Spaces:
Sleeping
Sleeping
import os | |
os.system('CMAKE_ARGS="-DLLAMA_OPENBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python==0.1.62') | |
import gradio as gr | |
from llama_cpp import Llama | |
llm = Llama(model_path="Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_K_M.bin", n_ctx=2048, n_batch=126) | |
def generate_text(prompt): | |
output = llm(prompt, max_tokens=468, temperature=0.1, top_p=0.5, echo=False, stop=["#"]) | |
output_text = output['choices'][0]['text'] | |
return output_text | |
description = "Wizard-Vicuna-13B-Uncensored, max_tokens=468, temperature=0.1, top_p=0.5" | |
examples = [ | |
["What is the capital of France? ", "The capital of France is Paris."], | |
["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."], | |
["What is the square root of 64?", "The square root of 64 is 8."] | |
] | |
gradio_interface = gr.Interface( | |
fn=generate_text, | |
inputs="text", | |
outputs="text", | |
title="Vicuna API", | |
) | |
gradio_interface.launch() |