Spaces:
Runtime error
Runtime error
File size: 1,170 Bytes
4bc7f77 d0df95e 08c738e d0df95e 4bc7f77 d0df95e 3fe812b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from transformers import pipeline
# Load the large language model (LLM)
try:
# Load model directly
from transformers import AutoProcessor, AutoModelForPreTraining
processor = AutoProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision-Instruct")
model = AutoModelForPreTraining.from_pretrained("meta-llama/Llama-3.2-11B-Vision-Instruct") # You can use a different model here
print("Model loaded successfully!")
except Exception as e:
print(f"Error loading model: {e}")
llm_pipeline = None
# Define the function to generate text based on input prompt
def generate_text(prompt):
if llm_pipeline is None:
return "Error: Model not loaded."
result = llm_pipeline(prompt, max_length=100, num_return_sequences=1)
return result[0]['generated_text']
# Create the Gradio interface
interface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=7, label="Input Prompt"),
outputs="text",
title="Large Language Model Text Generation",
description="Enter a prompt to generate text using a large language model."
)
print("Launching the Gradio interface...")
# Launch the interface
interface.launch()
|