Spaces:
Sleeping
Sleeping
File size: 937 Bytes
406df24 89368d2 406df24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# First define a prediction function that takes in a text prompt and returns the text completion
model = pipeline("text-generation", model="zenai-org/SmolLM-prompt-generation")
def predict(prompt):
out = model(
prompt,
max_length=77, # Max length of the generated sequence
min_length=10, # Minimum length of the generated sequence
do_sample=True, # Enable sampling
top_k=50, # Top-k sampling
top_p=0.95, # Top-p sampling
temperature=0.7, # Control the creativity of the output
eos_token_id=0, # End-of-sequence token
# pad_token_id = tokenizer.eos_token_id,
)
return out[0]['generated_text']
# Now create the interface
gr.Interface(fn=predict, inputs="text", outputs="text", css=".footer{display:none !important}").launch(share=True)
|