--- datasets: - cis519-Image2GPS/gps_images_dataset base_model: - cis519-Image2GPS/ImageToGPSproject_resnet18 --- - lat mean = 39.95169318421053 - lat std = 0.0007139636196696079 - lon mean = -75.19131129824562 - lon std = 0.0006948352800088026 --- To load & evaluate our model: ``` python !pip install geopy > delete.txt !pip install datasets > delete.txt !pip install torch torchvision datasets > delete.txt !pip install huggingface_hub > delete.txt !rm delete.txt !pip install transformers !pip install geopy import getpass from huggingface_hub import notebook_login # Securely input the Hugging Face token token = getpass.getpass("Enter your Hugging Face token: ") # Log in to Hugging Face Hub notebook_login(token) from huggingface_hub import hf_hub_download import torch from huggingface_hub import HfApi, HfFolder, Repository # Specify the repository and the filename of the model you want to load repo_id = "cis519-Image2GPS/ImageToGPSproject_resnet18_layer" # Replace with your repo name filename = "resnet_gps_regressor_complete.pth" model_path = hf_hub_download(repo_id=repo_id, filename=filename) # Load the model using torch model_test = torch.load(model_path) model_test.eval() # Set the model to evaluation mode from datasets import load_dataset, Image dataset_test = load_dataset("gydou/released_img", split="train") import torchvision.transforms as transforms import numpy as np from geopy.distance import geodesic device = torch.device("cuda" if torch.cuda.is_available() else "cpu") transform = transforms.Compose([ transforms.RandomResizedCrop(224), # Random crop and resize to 224x224 transforms.RandomHorizontalFlip(), # Random horizontal flip # transforms.RandomRotation(degrees=15), # Random rotation between -15 and 15 degrees transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1), # Random color jitter transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) inference_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) with torch.no_grad(): for data in dataset_test: image = inference_transform(data["image"]).unsqueeze(0).to(device) outputs = model_test(image) # print("Predicted latitude & longitude:", outputs.cpu().numpy()) lat_mean = 39.95169318421053 lat_std = 0.0007139636196696079 lon_mean = -75.19131129824562 lon_std = 0.0006948352800088026 all_distances = [] model_test.eval() with torch.no_grad(): for data in dataset_test: image = transform(data["image"]).unsqueeze(0).to(device) outputs = model_test(image).cpu().numpy() preds_denorm = outputs * np.array([lat_std, lon_std]) + np.array([lat_mean, lon_mean]) actual = [data["Latitude"], data["Longitude"]] distance = geodesic(actual, preds_denorm[0]).meters all_distances.append(distance) mean_error = np.mean(all_distances) rmse_error = np.sqrt(np.mean(np.square(all_distances))) print('mean_error: ', mean_error) print('rmse_error: ', rmse_error) ```