YuchenLi commited on
Commit
28fbafe
·
verified ·
1 Parent(s): b0b54a6

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +87 -0
README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - cis519-Image2GPS/gps_images_dataset
4
+ base_model:
5
+ - cis519-Image2GPS/ImageToGPSproject_resnet18
6
+ ---
7
+ - lat mean = 39.95169318421053
8
+ - lat std = 0.0007139636196696079
9
+ - lon mean = -75.19131129824562
10
+ - lon std = 0.0006948352800088026
11
+
12
+ ---
13
+ To load & evaluate our model:
14
+ ``` python
15
+ !pip install geopy > delete.txt
16
+ !pip install datasets > delete.txt
17
+ !pip install torch torchvision datasets > delete.txt
18
+ !pip install huggingface_hub > delete.txt
19
+ !rm delete.txt
20
+ !pip install transformers
21
+ !pip install geopy
22
+
23
+ import getpass
24
+ from huggingface_hub import notebook_login
25
+ # Securely input the Hugging Face token
26
+ token = getpass.getpass("Enter your Hugging Face token: ")
27
+ # Log in to Hugging Face Hub
28
+ notebook_login(token)
29
+
30
+ from huggingface_hub import hf_hub_download
31
+ import torch
32
+ from huggingface_hub import HfApi, HfFolder, Repository
33
+ # Specify the repository and the filename of the model you want to load
34
+ repo_id = "cis519-Image2GPS/ImageToGPSproject_resnet18_layer" # Replace with your repo name
35
+ filename = "resnet_gps_regressor_complete.pth"
36
+ model_path = hf_hub_download(repo_id=repo_id, filename=filename)
37
+ # Load the model using torch
38
+ model_test = torch.load(model_path)
39
+ model_test.eval() # Set the model to evaluation mode
40
+
41
+ from datasets import load_dataset, Image
42
+ dataset_test = load_dataset("gydou/released_img", split="train")
43
+
44
+ import torchvision.transforms as transforms
45
+ import numpy as np
46
+ from geopy.distance import geodesic
47
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
48
+ transform = transforms.Compose([
49
+ transforms.RandomResizedCrop(224), # Random crop and resize to 224x224
50
+ transforms.RandomHorizontalFlip(), # Random horizontal flip
51
+ # transforms.RandomRotation(degrees=15), # Random rotation between -15 and 15 degrees
52
+ transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1), # Random color jitter
53
+ transforms.ToTensor(),
54
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
55
+ std=[0.229, 0.224, 0.225])
56
+ ])
57
+ inference_transform = transforms.Compose([
58
+ transforms.Resize((224, 224)),
59
+ transforms.ToTensor(),
60
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
61
+ std=[0.229, 0.224, 0.225])
62
+ ])
63
+ with torch.no_grad():
64
+ for data in dataset_test:
65
+ image = inference_transform(data["image"]).unsqueeze(0).to(device)
66
+ outputs = model_test(image)
67
+ # print("Predicted latitude & longitude:", outputs.cpu().numpy())
68
+ lat_mean = 39.95169318421053
69
+ lat_std = 0.0007139636196696079
70
+ lon_mean = -75.19131129824562
71
+ lon_std = 0.0006948352800088026
72
+ all_distances = []
73
+ model_test.eval()
74
+ with torch.no_grad():
75
+ for data in dataset_test:
76
+ image = transform(data["image"]).unsqueeze(0).to(device)
77
+ outputs = model_test(image).cpu().numpy()
78
+ preds_denorm = outputs * np.array([lat_std, lon_std]) + np.array([lat_mean, lon_mean])
79
+ actual = [data["Latitude"], data["Longitude"]]
80
+ distance = geodesic(actual, preds_denorm[0]).meters
81
+ all_distances.append(distance)
82
+ mean_error = np.mean(all_distances)
83
+ rmse_error = np.sqrt(np.mean(np.square(all_distances)))
84
+ print('mean_error: ', mean_error)
85
+ print('rmse_error: ', rmse_error)
86
+
87
+ ```