Datasets:
sartajbhuvaji
commited on
Commit
•
3741a28
1
Parent(s):
7589982
Uploading mini dataset
Browse files- mini/README.md +40 -0
- mini/training_data-mini.npy +3 -0
- mini/training_data_stats.py +99 -0
mini/README.md
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Info
|
2 |
+
- Image Resolution : 270, 480
|
3 |
+
- Mode : RGB
|
4 |
+
- Dimension : (270, 480, 3)
|
5 |
+
- File Count : 01
|
6 |
+
- Size : 1.81 GB
|
7 |
+
|
8 |
+
### Data Count
|
9 |
+
```
|
10 |
+
'W': [1, 0, 0, 0, 0, 0, 0, 0, 0] : 3627
|
11 |
+
'S': [0, 1, 0, 0, 0, 0, 0, 0, 0] : 50
|
12 |
+
'A': [0, 0, 1, 0, 0, 0, 0, 0, 0] : 104
|
13 |
+
'D': [0, 0, 0, 1, 0, 0, 0, 0, 0] : 106
|
14 |
+
'WA': [0, 0, 0, 0, 1, 0, 0, 0, 0] : 364
|
15 |
+
'WD': [0, 0, 0, 0, 0, 1, 0, 0, 0] : 416
|
16 |
+
'SA': [0, 0, 0, 0, 0, 0, 1, 0, 0] : 35
|
17 |
+
'SD': [0, 0, 0, 0, 0, 0, 0, 1, 0] : 47
|
18 |
+
'NK': [0, 0, 0, 0, 0, 0, 0, 0, 1] : 248
|
19 |
+
NONE : 3
|
20 |
+
```
|
21 |
+
|
22 |
+
### Graphics Details
|
23 |
+
- Original Resolution : 800 x 600
|
24 |
+
- Aspect Ratio : 16:10
|
25 |
+
- All Video Settings : Low
|
26 |
+
|
27 |
+
### Camera Details
|
28 |
+
- Camera : Hood Cam
|
29 |
+
- Vehical Camera Height : Low
|
30 |
+
- First Person Vehical Auto-Center : On
|
31 |
+
- First Person Head Bobbing : Off
|
32 |
+
|
33 |
+
### Other Details
|
34 |
+
- Vehical : Michael's Car
|
35 |
+
- Vehical Mods : All Max
|
36 |
+
- Cv2 Mask : None
|
37 |
+
- Way Point : Enabled/Following
|
38 |
+
- Weather Conditions : Mostly Sunny
|
39 |
+
- Time of Day : Day, Night
|
40 |
+
- Rain : Some
|
mini/training_data-mini.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:489bbaecce52d879c1f6b629d7e49171db26a2cbbd81ca754da4a79cde7377e6
|
3 |
+
size 1944328936
|
mini/training_data_stats.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#training_data_stats.py
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import time
|
5 |
+
from collections import Counter
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
+
def get_count_choices(a,b):
|
9 |
+
total_count_choices = Counter()
|
10 |
+
for i in range(a,b):
|
11 |
+
training_data = np.load(f'training_data-mini.npy', allow_pickle=True)
|
12 |
+
choices = [str(data[1]) for data in training_data]
|
13 |
+
|
14 |
+
total_count_choices.update(choices)
|
15 |
+
count_choices_dict = dict(total_count_choices)
|
16 |
+
print(count_choices_dict)
|
17 |
+
|
18 |
+
def get_count_choices_per_file(a,b):
|
19 |
+
df = pd.DataFrame(columns=['File','W','S','A','D','WA','WD','SA','SD','NK','NONE'])
|
20 |
+
choice_to_column = {'[1, 0, 0, 0, 0, 0, 0, 0, 0]':'W',
|
21 |
+
'[0, 1, 0, 0, 0, 0, 0, 0, 0]':'S',
|
22 |
+
'[0, 0, 1, 0, 0, 0, 0, 0, 0]':'A',
|
23 |
+
'[0, 0, 0, 1, 0, 0, 0, 0, 0]':'D',
|
24 |
+
'[0, 0, 0, 0, 1, 0, 0, 0, 0]':'WA',
|
25 |
+
'[0, 0, 0, 0, 0, 1, 0, 0, 0]':'WD',
|
26 |
+
'[0, 0, 0, 0, 0, 0, 1, 0, 0]':'SA',
|
27 |
+
'[0, 0, 0, 0, 0, 0, 0, 1, 0]':'SD',
|
28 |
+
'[0, 0, 0, 0, 0, 0, 0, 0, 1]':'NK',
|
29 |
+
'None':'NONE'}
|
30 |
+
for i in range(a,b):
|
31 |
+
training_data = np.load(f'training_data-mini.npy', allow_pickle=True)
|
32 |
+
choice = [str(data[1]) for data in training_data]
|
33 |
+
count_choices = Counter(choice)
|
34 |
+
count_choices_dict = dict(count_choices)
|
35 |
+
df = df.append({'File': f'training_data-{i}.npy'}, ignore_index=True)
|
36 |
+
for key in count_choices_dict:
|
37 |
+
#print(key,':',count_choices_dict[key])
|
38 |
+
if key == None:
|
39 |
+
df.loc[i-a,'NONE'] = count_choices_dict['NONE']
|
40 |
+
else:
|
41 |
+
df.loc[i-a,choice_to_column[key]] = count_choices_dict[key]
|
42 |
+
#print(df)
|
43 |
+
df.replace(np.nan, 0, inplace=True)
|
44 |
+
df.to_csv('training_data_count_101-200.csv', index=False)
|
45 |
+
|
46 |
+
|
47 |
+
def roi(img, vertices):
|
48 |
+
# Applies ROI Mask to Image
|
49 |
+
mask = np.zeros_like(img)
|
50 |
+
cv2.fillPoly(mask, vertices, color=[255,255,255])
|
51 |
+
masked = cv2.bitwise_and(img, mask)
|
52 |
+
return masked
|
53 |
+
|
54 |
+
def display_training_data(n):
|
55 |
+
'''
|
56 |
+
Displays training data
|
57 |
+
'''
|
58 |
+
training_data = np.load(f'training_data-{n}.npy', allow_pickle=True)
|
59 |
+
mask = False #True
|
60 |
+
|
61 |
+
if mask:
|
62 |
+
# Masking Region of Interest
|
63 |
+
vertices = np.array([[0,25],[0,270],[100,270],[100,200],[430,200],[430,270],[480,270],[480,25],], np.int32)
|
64 |
+
|
65 |
+
for data in training_data:
|
66 |
+
img = data[0]
|
67 |
+
choice = data[1]
|
68 |
+
|
69 |
+
if mask:
|
70 |
+
img = roi(img, [vertices])
|
71 |
+
|
72 |
+
cv2.imshow('screen', img)
|
73 |
+
print(choice)
|
74 |
+
print(img.shape)
|
75 |
+
|
76 |
+
if cv2.waitKey(25) & 0xFF == ord('q'):
|
77 |
+
cv2.destroyAllWindows()
|
78 |
+
break
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
start_time = time.time()
|
82 |
+
#get_count_choices(1,2)
|
83 |
+
get_count_choices_per_file(1,2)
|
84 |
+
#display_training_data('mini')
|
85 |
+
print(f'Elapsed time: {time.time() - start_time} seconds')
|
86 |
+
|
87 |
+
# Output:
|
88 |
+
'''
|
89 |
+
'W': [1, 0, 0, 0, 0, 0, 0, 0, 0] : 3627
|
90 |
+
'S': [0, 1, 0, 0, 0, 0, 0, 0, 0] : 50
|
91 |
+
'A': [0, 0, 1, 0, 0, 0, 0, 0, 0] : 104
|
92 |
+
'D': [0, 0, 0, 1, 0, 0, 0, 0, 0] : 106
|
93 |
+
'WA': [0, 0, 0, 0, 1, 0, 0, 0, 0] : 364
|
94 |
+
'WD': [0, 0, 0, 0, 0, 1, 0, 0, 0] : 416
|
95 |
+
'SA': [0, 0, 0, 0, 0, 0, 1, 0, 0] : 35
|
96 |
+
'SD': [0, 0, 0, 0, 0, 0, 0, 1, 0] : 47
|
97 |
+
'NK': [0, 0, 0, 0, 0, 0, 0, 0, 1] : 248
|
98 |
+
NONE : 3
|
99 |
+
'''
|