File size: 6,879 Bytes
c1ee6a7
507dc5e
 
8621a57
 
 
2f01b38
8621a57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c1ee6a7
507dc5e
8621a57
 
 
c1ee6a7
f4a4f9c
 
 
 
 
 
 
 
 
 
 
 
507dc5e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import gradio as gr
import requests
from io import BytesIO
import os
import json
from dotenv import load_dotenv
from PIL import Image, ImageDraw, ImageFont
import fitz  # PyMuPDF
import arxiv
import tiktoken
from openai import OpenAI
import textwrap
from datetime import datetime

# Load environment variables from .env file
load_dotenv()

openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

def download_and_extract_paper_info(arxiv_id, token_limit=120000, model="gpt-3.5-turbo"):
    search = arxiv.Search(id_list=[arxiv_id])
    paper = next(search.results())
    
    title = paper.title
    publish_date = paper.published.date()
    
    pdf_url = f"https://arxiv.org/pdf/{arxiv_id}.pdf"
    response = requests.get(pdf_url)
    if response.status_code == 200:
        pdf_content = response.content
        
        doc = fitz.open(stream=pdf_content, filetype="pdf")
        text = ""
        encoding = tiktoken.encoding_for_model(model)
        
        for page in doc:
            page_text = page.get_text()
            text += page_text
            
            tokens = encoding.encode(text)
            if len(tokens) > token_limit:
                text = encoding.decode(tokens[:token_limit])
                break
        
        return {
            "title": title,
            "publish_date": publish_date,
            "full_text": text
        }
    else:
        print(f"Failed to download paper. Status code: {response.status_code}")
        return None

def summarize_text(text):
    prompt = f""" 
    You are getting the text version of an arxiv paper your goal is to provide a summary of the paper by providing bullet points which summarise the paper. 

    It should be exact three bullet points which summarise the paper. Return your response in JSON format where the keys are the bullet points and the values are the summaries of the bullet points as following:

    {{
    "bullet_point_1": "content",
    "bullet_point_2": "content",
    "bullet_point_3": "content"
    }}

    Here is the text of the paper:

    {text}
    """

    completion = openai_client.chat.completions.create(
        model="gpt-4o-mini",
        response_format={ "type": "json_object" },
        messages=[
            {"role": "user", "content": prompt}
        ],
        temperature=0.0,
    )

    summary = completion.choices[0].message.content
    return summary

def add_text_to_image(background_path, title, text_content, publish_date, output_path="output.jpg", scale_factor=2, offset=20):
    with Image.open(background_path) as img:
        width, height = img.size
        background = img.resize((width * scale_factor, height * scale_factor), Image.LANCZOS)
    
    draw = ImageDraw.Draw(background)

    title_font = ImageFont.truetype("fonts/Inika-Regular.ttf", 35 * scale_factor)
    content_font = ImageFont.truetype("fonts/Inika-Regular.ttf", 20 * scale_factor)
    date_font = ImageFont.truetype("fonts/Inika-Regular.ttf", 20 * scale_factor)
    arxiv_font = ImageFont.truetype("fonts/Larabieb.ttf", 50 * scale_factor)

    margin = 50 * scale_factor
    max_width = background.width - (2 * margin)

    # Dynamically calculate the width for wrapping the title
    wrapped_title = textwrap.wrap(title, width=int(max_width / (35 * scale_factor * 0.6)))
    y_text = 50 * scale_factor

    for line in wrapped_title:
        bbox = title_font.getbbox(line)
        line_width = bbox[2] - bbox[0]
        line_height = bbox[3] - bbox[1]
        x_text = (background.width - line_width) // 2
        draw.text((x_text, y_text), line, font=title_font, fill=(0, 0, 0))
        y_text += line_height + (10 * scale_factor)

    bullet_points = json.loads(text_content)
    total_height = sum(len(textwrap.wrap(value, width=90)) * (25 * scale_factor) + (20 * scale_factor) for value in bullet_points.values())
    y = (background.height - total_height) // 2
    bullet_width = content_font.getbbox("• ")[2]
    max_content_width = max(max(content_font.getbbox(line)[2] for line in textwrap.wrap(value, width=90)) for value in bullet_points.values())
    bullet_start_x = (background.width - max_content_width - bullet_width) // 2

    for value in bullet_points.values():
        wrapped_text = textwrap.wrap(value, width=90)
        
        for i, line in enumerate(wrapped_text):
            if i == 0:
                draw.text((bullet_start_x, y), "•", font=content_font, fill=(0, 0, 0))
                draw.text((bullet_start_x + bullet_width, y), line, font=content_font, fill=(0, 0, 0))
            else:
                draw.text((bullet_start_x + bullet_width, y + (25 * scale_factor * i)), line, font=content_font, fill=(0, 0, 0))
        
        y += (25 * scale_factor * len(wrapped_text)) + (20 * scale_factor)

    date_text = f"Published: {publish_date}"
    date_bbox = date_font.getbbox(date_text)
    date_height = date_bbox[3] - date_bbox[1]
    draw.text((margin, background.height - margin - date_height - offset), date_text, font=date_font, fill=(0, 0, 0))

    arxiv_text = "@arXivGPT"
    arxiv_bbox = arxiv_font.getbbox(arxiv_text)
    arxiv_width = arxiv_bbox[2] - arxiv_bbox[0]
    arxiv_height = arxiv_bbox[3] - arxiv_bbox[1]
    arxiv_x = background.width - margin - arxiv_width
    arxiv_y = background.height - margin - arxiv_height - offset

    pre_x_text = "@ar"
    pre_x_width = arxiv_font.getbbox(pre_x_text)[2]
    draw.text((arxiv_x, arxiv_y), pre_x_text, font=arxiv_font, fill=(0, 0, 0))

    x_text = "X"
    x_width = arxiv_font.getbbox(x_text)[2]
    draw.text((arxiv_x + pre_x_width, arxiv_y), x_text, font=arxiv_font, fill="#B31B1B")

    post_x_text = "ivGPT"
    draw.text((arxiv_x + pre_x_width + x_width, arxiv_y), post_x_text, font=arxiv_font, fill=(0, 0, 0))

    background.save(output_path, quality=95)
    print(f"High-resolution image saved as {output_path}")

def create_image_from_url(arxiv_id, background_path="background.jpg", output_path="output.jpg"):
    paper_info = download_and_extract_paper_info(arxiv_id)
    if paper_info:
        title = paper_info.get("title")
        publish_date = paper_info.get("publish_date")
        full_text = paper_info.get("full_text")
        summary = summarize_text(full_text)
        add_text_to_image(background_path, title, summary, publish_date, output_path)
    return output_path

def fetch_arxiv_image(arxiv_link):
    arxiv_id = arxiv_link.split('/')[-1]
    output_path = create_image_from_url(arxiv_id)
    return Image.open(output_path)

description_text = (
    "It will only work with an arXiv link. Based on the arXiv paper, a summary of the paper is generated "
    "and displayed in arXivGPT format (https://x.com/arXivGPT). Please input the arXiv link below."
)

demo = gr.Interface(
    fn=fetch_arxiv_image,
    inputs="text",
    outputs="image",
    allow_flagging="never",
    description=description_text
)
demo.launch()