Spaces:
Runtime error
Runtime error
import gradio as gr | |
from ctransformers import AutoModelForCausalLM | |
from huggingface_hub import hf_hub_download | |
model = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-GGUF", model_file="mistral-7b-instruct-v0.1.Q5_K_S.gguf", model_type="mistral", gpu_layers=0) | |
basePrompt = """#YOUR ROLE: You are a helpful, respectful, and honest online forum moderator. You are designed to detect and moderate online content. | |
You have to ensure that any online content shared with you should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. | |
Please ensure that your answers are socially unbiased and positive in nature. | |
If any online content, shared with you, does not make any sense or is not factually coherent, convey the same. | |
If you don't know whether the online content shared with you should be moderated or not, then please convey the same, instead of deciding whether to moderate or not. | |
#Below is the online content which has to be screened for moderation:""" | |
def generateResponse(prompt): | |
prompt = f"<s>[INST]\n{basePrompt}\n\n{prompt}\n[/INST]" | |
return model(prompt) | |
title = "Mistral-7B-Instruct-GGUF" | |
description = "This space is an attempt to run the GGUF 4 bit quantized version of 'Mistral-7B-Instruct'." | |
UI = gr.Interface( | |
fn=generate_response, | |
inputs=gr.Textbox(label="prompt", placeholder="Ask your queries here...."), | |
outputs=gr.Textbox(label="Response"), | |
title=title, | |
description=description | |
) | |
UI.launch() | |