Publications by Patrick Ford
Red Flour Beetles (Tribolium Castaneum) Choices
# Load necessary libraries pacman::p_load(pacman, readr, dplyr, ggplot2, tidyr, gridExtra) small_arena_file <- "small_arena_choice_test.csv" large_arena_file <- "large_arena_choice_test.csv" wind_tunnel_file <- "wind_tunnel_preference_tests_flours_2022.csv" # Load datasets small_arena_data <- read_csv(small_arena_file) ## Rows: 883 Columns: 7 ## ...
39 sym Python (7733 sym/26 pcs) 10 img
LLM vs Doctors
# Load necessary libraries pacman::p_load(pacman, readr, dplyr, ggplot2, gridExtra) # Data Frame with Metrics for Groups df <- data.frame( Outcome = c("Diagnostic Performance", "Diagnostic Performance", "Time per Case", "Time per Case", "LLM Alone", "LLM Alone"), Group = c("Conventional Resources", "With LLM", "Conventional Resources", "With L...
9 sym Python (6526 sym/3 pcs) 3 img
World Hum Map
# Load necessary libraries pacman::p_load(pacman, readr, dplyr, ggplot2, ggmap, sf, patchwork) # Load the data data <- read.csv("hummap_processed.csv") # List unique values in the gender column unique_genders <- unique(data$gender) print(unique_genders) ## [1] "Female" "Male" "Non Binary" # Use a world map overlay world_map <- map_data(...
7 sym 1 img
Children of the Stones; Sentiment Analysis
# Load necessary libraries pacman::p_load(pacman, tidytext, dplyr, tidyr, ggplot2, readr, topicmodels, gridExtra, wordcloud, RColorBrewer, quanteda, quanteda.textstats, grid) # Define the directory path and list of files file_path <- "/cloud/project" files <- c("Children_of_the_Stones_Full_Circle(7).csv", "Children_of_th...
14 sym Python (5710 sym/7 pcs) 4 img
Sentiment Analysis of Around the World in Eighty Days
# Load necessary libraries pacman::p_load(pacman, tidytext, dplyr, tidyr, ggplot2, readr, topicmodels, gridExtra, wordcloud, RColorBrewer, quanteda, quanteda.textstats) # Load the CSV file Eighty_data <- read_csv("Around_the_World_in_Eighty_Days.csv") ## Rows: 1703 Columns: 1 ## ── Column specification ─────────────�...
26 sym Python (5647 sym/19 pcs) 4 img
Optimised Travel Routes Between the 33 Most Populated Cities in the World; Nearest Neighbour (NN) vs Ant Colony Optimisation (ACO)
The routes chosen by the two algorithms in the code—Nearest Neighbour (NN) and Ant Colony Optimisation (ACO)—will behave differently in terms of reproducibility when the code is run multiple times. Nearest Neighbour (NN) The NN algorithm is deterministic, as it selects the next city to visit based on the smallest distance available from the c...
1789 sym Python (8380 sym/9 pcs) 1 img
The World Magnetic Model (WMM); Movement of Magnetic North and South Pole Locations (2000–2025); Globe
# Load necessary libraries pacman::p_load(pacman, readr, tidyverse, ggplot2, viridis, maps, mapproj) # Function to load, process, and split the dataset load_and_process_data <- function(file_path) { # Load the dataset data <- read.csv(file_path, header = FALSE, col.names = "Values") # Split the 'Values' column into Longitude, Latitude, and ...
7 sym 2 img
The World Magnetic Model (WMM); Movement of Magnetic North and South Pole Locations (2000–2025); 2D
# Load necessary libraries pacman::p_load(pacman, readr, tidyverse, ggplot2, viridis) # Function to load, process, and split the dataset load_and_process_data <- function(file_path) { # Load the dataset data <- read.csv(file_path, header = FALSE, col.names = "Values") # Split the 'Values' column into Longitude, Latitude, and Year data <- ...
7 sym 2 img
GSMNP Compass Declination
# Load necessary libraries pacman::p_load(pacman, readr, tidyverse, ggplot2, dplyr, gridExtra, sf, akima, rnaturalearth, rnaturalearthdata) # Load the dataset data <- read.csv("GRSM_COMPASS_DECLINATION_5646692726181302872.csv") # Convert the data into an sf object coordinates <- st_as_sf(data, coords = c("LON", "LAT"), crs = 4326) # Get natural ...
14 sym 5 img
North & South Pole Movements: IGRF 2D
# Load necessary libraries pacman::p_load(pacman, readr, tidyverse, ggplot2, viridis) # Function to load, process, and split the dataset load_and_process_data <- function(file_path) { # Load the dataset data <- read.csv(file_path, header = FALSE, col.names = "Values") # Split the 'Values' column into Longitude, Latitude, and Year data <- ...
7 sym Python (1885 sym/2 pcs) 2 img