|
import json |
|
import requests |
|
import sys |
|
import os |
|
from datetime import datetime |
|
from dateutil.relativedelta import relativedelta |
|
import logging |
|
from multiprocessing import Pool, cpu_count |
|
from tqdm import tqdm |
|
import re |
|
import feedparser |
|
import time |
|
|
|
|
|
MONTHS_BACK = 99 |
|
OUTPUT_DIR = 'arxiv_dumps' |
|
JSONL_FILE = 'downloaded_papers.jsonl' |
|
CACHE_DIR = 'cache' |
|
SEARCH_CACHE_FILE = os.path.join(CACHE_DIR, 'search_cache.jsonl') |
|
MAX_RESULTS_PER_PAGE = 100 |
|
MAX_BACKOFF = 120 |
|
CONCURRENT_DOWNLOADS = min(32, cpu_count()) |
|
|
|
def setup_logging(enable_logging: bool): |
|
if enable_logging: |
|
logging.basicConfig(filename='arxivdump.log', filemode='a', |
|
format='%(asctime)s - %(levelname)s - %(message)s', |
|
level=logging.DEBUG) |
|
else: |
|
logging.basicConfig(level=logging.CRITICAL) |
|
logging.disable(logging.CRITICAL) |
|
|
|
def sanitize_filename(name): |
|
return re.sub(r'[^\w\-]', '', name.replace(' ', '_')) |
|
|
|
def load_cache(file_path): |
|
if not os.path.exists(file_path): |
|
return set() |
|
with open(file_path, 'r', encoding='utf-8') as f: |
|
return set(json.loads(line).get('id') for line in f) |
|
|
|
def update_cache(file_path, metadata): |
|
os.makedirs(os.path.dirname(file_path), exist_ok=True) |
|
with open(file_path, 'a', encoding='utf-8') as f: |
|
f.write(json.dumps(metadata, ensure_ascii=False) + '\n') |
|
|
|
def fetch_paper_source(entry_id, headers): |
|
src_url = entry_id.replace("/abs/", "/src/").replace("https://arxiv.org/", "https://export.arxiv.org/") |
|
backoff = 1 |
|
|
|
while True: |
|
try: |
|
response = requests.get(src_url, headers=headers, allow_redirects=True, timeout=60) |
|
content_type = response.headers.get('Content-Type', '').lower() |
|
content_disposition = response.headers.get('Content-Disposition', '') |
|
|
|
if 'filename=' in content_disposition: |
|
filename = content_disposition.split('filename=')[-1].strip('"') |
|
else: |
|
filename = f"{entry_id.split('/')[-1]}.unlabeled_file_type" |
|
|
|
return (response.content, content_type, dict(response.headers), filename) |
|
|
|
except requests.exceptions.RequestException as e: |
|
logging.warning(f"Request failed for {entry_id}: {e}. Retrying in {backoff} seconds.") |
|
time.sleep(backoff) |
|
backoff = min(backoff * 2, MAX_BACKOFF) |
|
|
|
def save_archive_and_metadata(content, content_type, response_headers, filename, paper_title, index, jsonl_file, paper): |
|
sanitized_title = sanitize_filename(paper_title) |
|
paper_dir = os.path.join(OUTPUT_DIR, f"{index}_{sanitized_title}") |
|
os.makedirs(paper_dir, exist_ok=True) |
|
|
|
if content and filename: |
|
archive_path = os.path.join(paper_dir, filename) |
|
with open(archive_path, 'wb') as f: |
|
f.write(content) |
|
logging.info(f"Saved archive to: {archive_path}") |
|
else: |
|
logging.warning(f"No archive content to save for paper {index}_{sanitized_title}.") |
|
|
|
metadata = { |
|
"id": paper['id'], |
|
"title": paper['title'], |
|
"authors": [author['name'] for author in paper['authors']], |
|
"summary": paper['summary'], |
|
"categories": [tag['term'] for tag in paper['tags']], |
|
"published": paper['published'], |
|
"updated": paper['updated'], |
|
"links": paper['links'], |
|
"source_response_headers": response_headers, |
|
"downloaded_at": datetime.utcnow().isoformat() + 'Z' |
|
} |
|
|
|
metadata_path = os.path.join(paper_dir, 'metadata.json') |
|
with open(metadata_path, 'w', encoding='utf-8') as f: |
|
json.dump(metadata, f, ensure_ascii=False, indent=4) |
|
|
|
update_cache(jsonl_file, metadata) |
|
|
|
def cache_search_results(results): |
|
os.makedirs(CACHE_DIR, exist_ok=True) |
|
with open(SEARCH_CACHE_FILE, 'a', encoding='utf-8') as f: |
|
for paper in results: |
|
f.write(json.dumps({ |
|
'id': paper['id'], |
|
'title': paper['title'], |
|
'published': paper['published'], |
|
'updated': paper['updated'], |
|
'summary': paper['summary'], |
|
'authors': [author['name'] for author in paper['authors']], |
|
'categories': [tag['term'] for tag in paper['tags']], |
|
'links': paper['links'] |
|
}, ensure_ascii=False) + '\n') |
|
|
|
def parse_atom_feed(feed_content): |
|
parsed_feed = feedparser.parse(feed_content) |
|
results = [] |
|
|
|
for entry in parsed_feed.entries: |
|
links_dict = {link.get('title', link.get('rel', 'unknown')): link.get('href') for link in entry.links} |
|
results.append({ |
|
'id': entry.id, |
|
'title': entry.title, |
|
'summary': entry.summary, |
|
'published': entry.published, |
|
'updated': entry.updated, |
|
'tags': [{'term': tag.term} for tag in entry.get('tags', [])], |
|
'authors': [{'name': author.name} for author in entry.authors], |
|
'links': links_dict |
|
}) |
|
|
|
return results |
|
|
|
def process_paper(args): |
|
index, paper, headers, jsonl_file, cached_ids = args |
|
entry_id = paper['id'] |
|
paper_id = entry_id.split('/')[-1] |
|
|
|
if paper_id in cached_ids: |
|
return None |
|
|
|
try: |
|
content, content_type, response_headers, filename = fetch_paper_source(entry_id, headers) |
|
if content and content_type: |
|
save_archive_and_metadata(content, content_type, response_headers, filename, paper['title'], index, jsonl_file, paper) |
|
return paper['published'] |
|
except Exception as e: |
|
logging.error(f"Exception occurred while processing paper {paper_id}: {e}") |
|
|
|
return None |
|
|
|
def main(): |
|
setup_logging(True) |
|
|
|
print(f"Starting arXiv paper download for the past {MONTHS_BACK} months") |
|
logging.info(f"Starting arXiv paper download for the past {MONTHS_BACK} months") |
|
|
|
query = "cat:cs.*" |
|
sort_by = "submittedDate" |
|
sort_order = "descending" |
|
print(f"Search query: {query} | sortBy={sort_by} | sortOrder={sort_order}") |
|
logging.info(f"Search query: {query} | sortBy={sort_by} | sortOrder={sort_order}") |
|
|
|
start = 0 |
|
papers = [] |
|
cutoff_date = datetime.now() - relativedelta(months=MONTHS_BACK) |
|
reached_cutoff = False |
|
|
|
while not reached_cutoff: |
|
search_url = f"http://export.arxiv.org/api/query?search_query={query}&sortBy={sort_by}&sortOrder={sort_order}&start={start}&max_results={MAX_RESULTS_PER_PAGE}" |
|
print(f"Fetching page starting from {start}: {search_url}") |
|
logging.info(f"Fetching page starting from {start}: {search_url}") |
|
|
|
response = requests.get(search_url) |
|
parsed_results = parse_atom_feed(response.content) |
|
if not parsed_results: |
|
break |
|
|
|
for paper in parsed_results: |
|
published_date = datetime.strptime(paper['published'], "%Y-%m-%dT%H:%M:%SZ") |
|
if published_date < cutoff_date: |
|
reached_cutoff = True |
|
break |
|
papers.append(paper) |
|
|
|
cache_search_results(parsed_results) |
|
start += len(parsed_results) |
|
|
|
if len(parsed_results) < MAX_RESULTS_PER_PAGE: |
|
break |
|
|
|
time.sleep(3) |
|
|
|
headers = { |
|
"User-Agent": "Mozilla/5.0 (compatible; ArXivDownloader/1.0; +https://github.com/yourusername/arxiv-downloader)", |
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", |
|
"Accept-Language": "en-US,en;q=0.5", |
|
"Referer": "https://arxiv.org/", |
|
"Connection": "keep-alive", |
|
"Upgrade-Insecure-Requests": "1" |
|
} |
|
|
|
cached_ids = load_cache(JSONL_FILE) |
|
|
|
print(f"Starting to process {len(papers)} papers with multiprocessing...") |
|
logging.info(f"Starting to process {len(papers)} papers with multiprocessing.") |
|
os.makedirs(OUTPUT_DIR, exist_ok=True) |
|
|
|
args_list = [(index, paper, headers, JSONL_FILE, cached_ids) for index, paper in enumerate(papers, start=1)] |
|
|
|
with Pool(processes=CONCURRENT_DOWNLOADS) as pool: |
|
list(tqdm(pool.imap_unordered(process_paper, args_list), total=len(papers), desc="Downloading papers")) |
|
|
|
print("Processing complete.") |
|
logging.info("Processing complete.") |
|
|
|
if __name__ == "__main__": |
|
main() |