24Sureshkumar commited on
Commit
af9f29e
Β·
verified Β·
1 Parent(s): 1d769ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -19
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import requests
3
  from transformers import MarianMTModel, MarianTokenizer, AutoModelForCausalLM, AutoTokenizer
@@ -18,19 +19,11 @@ translator_tokenizer = MarianTokenizer.from_pretrained(translator_model)
18
  generator_model = "EleutherAI/gpt-neo-1.3B"
19
  generator = AutoModelForCausalLM.from_pretrained(generator_model).to(device)
20
  generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
21
-
22
- # Ensure tokenizer has a padding token
23
  if generator_tokenizer.pad_token is None:
24
  generator_tokenizer.pad_token = generator_tokenizer.eos_token
25
 
26
- # Set Hugging Face API Key
27
  HF_API_KEY = os.getenv("HF_API_KEY") # Use environment variable
28
- if not HF_API_KEY:
29
- print("⚠️ Hugging Face API key is missing! Set HF_API_KEY in your environment.")
30
- else:
31
- print("βœ… Hugging Face API key detected.")
32
-
33
- # Use Stable Diffusion Model for Image Generation
34
  IMAGE_GEN_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
35
  HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
36
 
@@ -49,22 +42,15 @@ def generate_text(prompt):
49
  def generate_image(prompt):
50
  """Sends request to API for image generation."""
51
  response = requests.post(IMAGE_GEN_URL, headers=HEADERS, json={"inputs": prompt})
52
-
53
- print("πŸ”„ Image Generation Request Sent!")
54
- print(f"Status Code: {response.status_code}")
55
-
56
  if response.status_code == 200:
57
- print("βœ… Image successfully generated!")
58
  return Image.open(io.BytesIO(response.content))
59
-
60
- print("❌ Error in image generation:", response.text)
61
  return Image.new("RGB", (300, 300), "red") # Error placeholder image
62
 
63
  def process_input(tamil_text):
64
  """Complete pipeline: Translation, Text Generation, and Image Generation."""
65
  english_text = translate_tamil_to_english(tamil_text)
66
  creative_text = generate_text(english_text)
67
- image = generate_image(creative_text)
68
  return english_text, creative_text, image
69
 
70
  # Create Gradio Interface
@@ -81,5 +67,4 @@ interface = gr.Interface(
81
  )
82
 
83
  # Launch the Gradio app
84
- interface.launch()
85
-
 
1
+ # hugging face
2
  import gradio as gr
3
  import requests
4
  from transformers import MarianMTModel, MarianTokenizer, AutoModelForCausalLM, AutoTokenizer
 
19
  generator_model = "EleutherAI/gpt-neo-1.3B"
20
  generator = AutoModelForCausalLM.from_pretrained(generator_model).to(device)
21
  generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
 
 
22
  if generator_tokenizer.pad_token is None:
23
  generator_tokenizer.pad_token = generator_tokenizer.eos_token
24
 
25
+ # Hugging Face API for Image Generation
26
  HF_API_KEY = os.getenv("HF_API_KEY") # Use environment variable
 
 
 
 
 
 
27
  IMAGE_GEN_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
28
  HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
29
 
 
42
  def generate_image(prompt):
43
  """Sends request to API for image generation."""
44
  response = requests.post(IMAGE_GEN_URL, headers=HEADERS, json={"inputs": prompt})
 
 
 
 
45
  if response.status_code == 200:
 
46
  return Image.open(io.BytesIO(response.content))
 
 
47
  return Image.new("RGB", (300, 300), "red") # Error placeholder image
48
 
49
  def process_input(tamil_text):
50
  """Complete pipeline: Translation, Text Generation, and Image Generation."""
51
  english_text = translate_tamil_to_english(tamil_text)
52
  creative_text = generate_text(english_text)
53
+ image = generate_image(english_text)
54
  return english_text, creative_text, image
55
 
56
  # Create Gradio Interface
 
67
  )
68
 
69
  # Launch the Gradio app
70
+ interface.launch()