import json import subprocess from threading import Thread import torch import spaces import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) MODEL_ID = "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B" CHAT_TEMPLATE = "َAuto" MODEL_NAME = MODEL_ID.split("/")[-1] CONTEXT_LENGTH = 16000 # Estableciendo valores directamente para las variables COLOR = "black" # Color predeterminado de la interfaz EMOJI = "🤖" # Emoji predeterminado para el modelo DESCRIPTION = f"This is 4bit quntized {MODEL_NAME} model with BnB and designed for testing thinking for general AI tasks." # Descripción predeterminada latex_delimiters_set = [{ "left": "\\(", "right": "\\)", "display": False }, { "left": "\\begin{equation}", "right": "\\end{equation}", "display": True }, { "left": "\\begin{align}", "right": "\\end{align}", "display": True }, { "left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True }, { "left": "\\begin{gather}", "right": "\\end{gather}", "display": True }, { "left": "\\begin{CD}", "right": "\\end{CD}", "display": True }, { "left": "\\[", "right": "\\]", "display": True }] @spaces.GPU() def predict(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p): # Format history with a given chat template stop_tokens = [tokenizer.eos_token_id] instruction = system_prompt + "\n\n" for user, assistant in history: instruction += f"role:user, content: {user}\nrole:assistant, content: {assistant}\n" instruction += f"role:user, content: {message}\nassistant:" print(instruction) streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) enc = tokenizer(instruction, return_tensors="pt", padding=True, truncation=True) input_ids, attention_mask = enc.input_ids, enc.attention_mask if input_ids.shape[1] > CONTEXT_LENGTH: input_ids = input_ids[:, -CONTEXT_LENGTH:] attention_mask = attention_mask[:, -CONTEXT_LENGTH:] generate_kwargs = dict( input_ids=input_ids.to(device), attention_mask=attention_mask.to(device), streamer=streamer, do_sample=True, temperature=temperature, max_new_tokens=max_new_tokens, top_k=top_k, repetition_penalty=repetition_penalty, top_p=top_p ) t = Thread(target=model.generate, kwargs=generate_kwargs) t.start() outputs = [] for new_token in streamer: outputs.append(new_token) if new_token in stop_tokens: break yield "".join(outputs) # Load model device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForCausalLM.from_pretrained( MODEL_ID, device_map="auto", quantization_config=quantization_config, attn_implementation="flash_attention_2", ) # Create Gradio interface gr.ChatInterface( predict, title=EMOJI + " " + MODEL_NAME, description=DESCRIPTION, additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False), additional_inputs=[ gr.Textbox("You are a useful assistant. first recognize user language and then reply based on his language", label="System prompt"), gr.Slider(0, 1, 0.3, label="Temperature"), gr.Slider(128, 4096, 1024, label="Max new tokens"), gr.Slider(1, 80, 40, label="Top K sampling"), gr.Slider(0, 2, 1.1, label="Repetition penalty"), gr.Slider(0, 1, 0.95, label="Top P sampling"), ], #theme=gr.themes.Soft(primary_hue=COLOR), ).queue().launch()