Create data_collection.py
Browse files- data_collection.py +43 -0
data_collection.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
This is an example of how the data was collected for a large subreddit (post level).
|
3 |
+
'''
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
import json
|
7 |
+
|
8 |
+
master_df = pd.DataFrame()
|
9 |
+
path = '/content/drive/MyDrive/project 1 data (sta 663)/The_Donald/'
|
10 |
+
chunks = []
|
11 |
+
for i in range(1,9):
|
12 |
+
chunks.append(path + 'The_Donald_submissions.00' + str(i))
|
13 |
+
|
14 |
+
# chunks = ["/content/drive/MyDrive/project 1 data (sta 663)/The_Donald"]
|
15 |
+
|
16 |
+
# read line by line
|
17 |
+
for chunk in chunks:
|
18 |
+
data = []
|
19 |
+
with open(chunk, 'r') as file:
|
20 |
+
for line in file:
|
21 |
+
try:
|
22 |
+
# Parse the JSON line and append to the list
|
23 |
+
data.append(json.loads(line))
|
24 |
+
except json.JSONDecodeError as e:
|
25 |
+
# Output the error and skip this line
|
26 |
+
print(f"Error decoding JSON: {e}") # usually due to post deletion or incorrect type
|
27 |
+
|
28 |
+
df = pd.DataFrame(data)
|
29 |
+
|
30 |
+
# 'created_utc' to a datetime column
|
31 |
+
df['created_utc'] = pd.to_datetime(df['created_utc'], unit='s')
|
32 |
+
|
33 |
+
# extract the year from the 'created_utc' column
|
34 |
+
df['year'] = df['created_utc'].dt.year
|
35 |
+
|
36 |
+
# filtering the years for 2014 to the current year
|
37 |
+
df_filtered = df[(df['year'] >= 2014)]
|
38 |
+
|
39 |
+
master_df = pd.concat([master_df, df_filtered])
|
40 |
+
|
41 |
+
# can sort and get the top 100 posts per year by score
|
42 |
+
master_df_sorted = master_df.sort_values(by=['year', 'score'], ascending=[True, False])
|
43 |
+
top_posts_per_year = master_df_sorted.groupby('year').head(100)
|