Spaces:
Runtime error
Runtime error
File size: 7,073 Bytes
38a0b0b c7a0e20 38a0b0b c7a0e20 38a0b0b c7a0e20 38a0b0b e726183 38a0b0b e726183 38a0b0b e726183 38a0b0b e726183 38a0b0b e726183 38a0b0b e726183 38a0b0b e726183 38a0b0b e726183 38a0b0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
# # %%bash
# # # git lfs install
# # # git clone https://huggingface.co/spaces/Xhaheen/meme_world
# # # pip install -r /content/meme_world/requirements.txt
# # # pip install gradio
# # cd /meme_world
# import torch
# import re
# import gradio as gr
# from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
# import cohere
# import os
# #
# # os.environ['key_srkian'] = ''
# key_srkian = os.environ["key_srkian"]
# co = cohere.Client(key_srkian)#srkian
# device='cpu'
# encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
# decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
# model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
# feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
# tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
# model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
# def predict(department,image,max_length=64, num_beams=4):
# image = image.convert('RGB')
# image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
# clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
# caption_ids = model.generate(image, max_length = max_length)[0]
# caption_text = clean_text(tokenizer.decode(caption_ids))
# dept=department
# context= caption_text
# response = co.generate(
# model='large',
# prompt=f'create non offensive one line meme for given department and context\n\ndepartment- data science\ncontext-a man sitting on a bench with a laptop\nmeme- \"I\'m not a data scientist, but I play one on my laptop.\"\n\ndepartment-startup\ncontext-a young boy is smiling while using a laptop\nmeme-\"When your startup gets funded and you can finally afford a new laptop\"\n\ndepartment- {dept}\ncontext-{context}\nmeme-',
# max_tokens=20,
# temperature=0.8,
# k=0,
# p=0.75,
# frequency_penalty=0,
# presence_penalty=0,
# stop_sequences=["department"],
# return_likelihoods='NONE')
# reponse=response.generations[0].text
# reponse = reponse.replace("department", "")
# Feedback_SQL="DEPT"+dept+"CAPT"+caption_text+"MAMAY"+reponse
# return reponse
# # input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
# output = gr.outputs.Textbox(type="text",label="Meme")
# #examples = [f"example{i}.jpg" for i in range(1,7)]
# #examples = os.listdir()
# examples = [f"example{i}.png" for i in range(1,7)]
# #examples=os.listdir()
# #for fichier in examples:
# # if not(fichier.endswith(".png")):
# # examples.remove(fichier)
# description= " Looking for a fun and easy way to generate memes? Look no further than Meme world! Leveraging large language models like GPT-3PT-3 / Ai21 / Cohere, you can create memes that are sure to be a hit with your friends or network. Created with ♥️ by Arsalan @[Xaheen](https://www.linkedin.com/in/sallu-mandya/). kindly share your thoughts in discussion session and use the app responsibly #NO_Offense \n \n built with ❤️ @[Xhaheen](https://www.linkedin.com/in/sallu-mandya/)"
# title = "Meme world 🖼️"
# dropdown=["data science", "product management","marketing","startup" ,"agile","crypto" , "SEO" ]
# article = "Created By : Xaheen "
# interface = gr.Interface(
# fn=predict,
# inputs = [gr.inputs.Dropdown(dropdown),gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)],
# theme="grass",
# outputs=output,
# examples =[['data science', 'example5.png'],
# ['product management', 'example2.png'],
# ['startup', 'example3.png'],
# ['marketing', 'example4.png'],
# ['agile', 'example1.png'],
# ['crypto', 'example6.png']],
# title=title,
# description=description,
# article = article,
# )
# interface.launch(debug=True)
# Step 2: Set up the Gradio interface and import necessary packages
import gradio as gr
import openai
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
import torch
from PIL import Image
# Step 3: Load the provided image captioning model
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Step 4: Create a function to generate captions from images
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
def generate_caption(image):
image = Image.fromarray(image.astype('uint8'), 'RGB')
if image.mode != "RGB":
image = image.convert(mode="RGB")
pixel_values = feature_extractor(images=[image], return_tensors="pt").pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
caption = tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()
return caption
# Step 5: Create a function to generate memes using the GPT-3 API
def generate_meme(caption, department):
openai.api_key = os.environ["key"]
prompt = f"Create a non-offensive meme caption for the following image description in the context of {department} department: {caption}"
response = openai.Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=50, n=1, stop=None, temperature=0.7)
meme_caption = response.choices[0].text.strip()
return meme_caption
# Step 6: Define the main meme generation function
def meme_generator(image, department):
caption = generate_caption(image)
meme_caption = generate_meme(caption, department)
return meme_caption
examples = [f"example{i}.png" for i in range(1,7)]
# Step 7: Launch the Gradio application
image_input = gr.inputs.Image()
department_input = gr.inputs.Dropdown(choices=["data science", "product management","marketing","startup" ,"agile","crypto" , "SEO" ])
output_text = gr.outputs.Textbox()
gr.Interface(fn=meme_generator, inputs=[image_input, department_input], outputs=output_text, title="Meme world!",description= " Looking for a fun and easy way to generate memes? Look no further than Meme world! Leveraging large language models like GPT-3PT-3 / Ai21 / Cohere, you can create memes that are sure to be a hit with your friends or network. Created with ♥️ by Arsalan @[Xaheen](https://www.linkedin.com/in/sallu-mandya/). kindly share your thoughts in discussion session and use the app responsibly #NO_Offense \n \n built with ❤️ @[Xhaheen](https://www.linkedin.com/in/sallu-mandya/)", theme="grass",
examples =[['example5.png','data science' ],
['example2.png','product management'],
['example3.png','startup'],
['example4.png','marketing'],
['example1.png','agile'],
['example6.png','crypto']]).launch(debug=True)
|