#https://discuss.huggingface.co/t/dynamical-flexible-output/18146/6 #https://github.com/gradio-app/gradio/issues/2066 import gradio as gr #from transformers import AutoModelForCausalLM, AutoTokenizer import pandas as pd from datetime import datetime, timedelta, timezone #import torch from config import groq_token, groq_model, QUESTION_PROMPT, init_google_sheets_client, groq_model, default_model_name, user_names, google_sheets_name #from config import hugging_face_token, replicate_token #import replicate import gspread from groq import Client import random, string, json, io from googleapiclient.discovery import build from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload from google.oauth2 import service_account # Import service_account module # Initialize Google Sheets client client = init_google_sheets_client() sheet = client.open(google_sheets_name) #sheet = client.open_by_key('1kA37sJps3nhki-s9S7J_mQtNoqoWOLvezV0HobHzQ4s') ID planilla chatbot test nuevo stories_sheet = sheet.worksheet("Stories") system_prompts_sheet = sheet.worksheet("System Prompts") # Combine both model dictionaries all_models = {**groq_model} def randomize_key_order(aux): keys = list(aux.keys()) #Shuffle the list of keys random.shuffle(keys) #Create a new dictionary with shuffled keys return {key: aux[key] for key in keys} alphabet = list(string.ascii_uppercase) # Initialize GROQ client groq_clinet = Client(api_key=groq_token) # Load stories from Google Sheets def load_stories(): stories_data = stories_sheet.get_all_values() stories = [{"title": story[0], "story": story[1]} for story in stories_data if story[0] != "Title"] # Skip header row return stories # Load system prompts from Google Sheets def load_system_prompts(): system_prompts_data = system_prompts_sheet.get_all_values() system_prompts = [prompt[0] for prompt in system_prompts_data[1:]] # Skip header row return system_prompts # Load available stories and system prompts stories = load_stories() system_prompts = load_system_prompts() # Initialize the selected model selected_model = default_model_name tokenizer, model = None, None # Initialize the data list data = [] # Chat history chat_history = [] model_history = [] # Save all_answers to Google Drive FILE_ID = '1PwEiBxpHo0jRc6T1HixyC99UnP9iawbr' def save_answers(all_answers): # Credenciales de la cuenta de servicio (reemplaza con tus credenciales) SCOPES = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] SERVICE_ACCOUNT_FILE = 'polar-land-440713-c4-bbc8d89804d8.json' # Autentificación credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES) service = build('drive', 'v3', credentials=credentials) # Obtener el archivo existente file = service.files().get(fileId=FILE_ID).execute() # Download the content using get_media instead of export_media request = service.files().get_media(fileId=FILE_ID) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print("Download %d%%." % int(status.progress() * 100)) # Cargar el contenido JSON content = fh.getvalue() if content: existing_data = json.loads(content) else: existing_data = {} # Convert sets to lists before serialization if they exist def convert_sets_to_lists(obj): if isinstance(obj, set): return list(obj) if isinstance(obj, dict): return {k: convert_sets_to_lists(v) for k, v in obj.items()} if isinstance(obj, list): return [convert_sets_to_lists(item) for item in obj] return obj existing_data = convert_sets_to_lists(existing_data) # Agregar los nuevos datos al arreglo if 'data' in existing_data: existing_data['data'].append(all_answers) else: existing_data['data'] = [all_answers] # Convertir los datos a formato JSON new_content = json.dumps(existing_data) # Create a temporary file to store the JSON data with open('temp_data.json', 'w') as temp_file: temp_file.write(new_content) media = MediaFileUpload('temp_data.json', mimetype='application/json') file = service.files().update(fileId=FILE_ID, media_body=media, fields='id').execute() print('Archivo actualizado correctamente: %s' % file.get('id')) #Function to save comment and score def save_comment_score(score, comment, story_name, user_name, system_prompt, models): print("Saving comment and score...") print(chat_history) print(model_history) full_chat_history = "" # Create formatted chat history with roles #and model in model_history for message in chat_history: print(message['role']) if message['role'] == 'user': # User message full_chat_history += f"User: {message['content']}\n" if message['role'] == 'assistant': # Assistant message full_chat_history += f"Model:{model_history.pop(0)} Assistant: {message['content']}\n" timestamp = datetime.now(timezone.utc) - timedelta(hours=3) # Adjust to GMT-3 timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S") model_name = (' ').join(models) # Append data to local data storage print(full_chat_history) data.append([ timestamp_str, user_name, model_name, system_prompt, story_name, full_chat_history, score, comment ]) # Append data to Google Sheets try: user_sheet = client.open(google_sheets_name).worksheet(user_name) except gspread.exceptions.WorksheetNotFound: user_sheet = client.open(google_sheets_name).add_worksheet(title=user_name, rows="100", cols="20") user_sheet.append_row([timestamp_str, user_name, model_name, system_prompt, story_name, full_chat_history, score, comment]) # Save all answers to Google Drive as a JSON file print(f"all answers...\n{all_answers}") save_answers(all_answers) #Append data and render the data table df = pd.DataFrame(data, columns=["Timestamp", "User Name", "Model Name", "System Prompt", "Story Name", "Chat History", "Score", "Comment"]) return df[["Chat History", "Score", "Comment"]], gr.update(value="") # Show only the required columns and clear the comment input box # Function to handle interaction with model def interact_groq(context, model_name): chat_completion = groq_clinet.chat.completions.create( messages=context, model=model_name, temperature=0.1, max_tokens=100, ) #print(chat_completion) return chat_completion.choices[0].message.content #i=[story_dropdown, model_dropdown, system_prompt_dropdown], #o=[chatbot_output, chat_history_json, data_table, selected_story_textbox]) # Function to send selected story and initial message def send_selected_story(title, model_name, system_prompt): global chat_history global selected_story global data # Ensure data is reset data = [] # Reset data for new story selected_story = title for story in stories: if story["title"] == title: system_prompt = f""" {system_prompt} Here is the story: --- {story['story']} --- """ combined_message = system_prompt.strip() if combined_message: chat_history = [] # Reset chat history chat_history.append({"role": "system", "content": combined_message}) chat_history.append({"role": "user", "content": QUESTION_PROMPT}) response = interact_groq(chat_history, model_name) resp = {"role": "assistant", "content": response.strip()} return resp, chat_history, story["story"] else: print("Combined message is empty.") else: print("Story title does not match.") #i=[story_dropdown, model_dropdown, system_prompt_dropdown], #o=[chatbot_output, chat_history_json, data_table, selected_story_textbox]) #recibo varios respuestas las muestro nomas, agrego al contexto solo la que se #story_dropdown, model_checkbox, system_prompt_dropdown] def send_multiple_selected_story(title, models, system_prompt): global model_history global chatbot_answser_list global all_answers resp_list = [] print(models) #iterate over words #shuffle_models = randomize_key_order(all_models) random.shuffle(models) print(f"models shuffled: {models}") for index, model in enumerate(models): resp, context, _ = send_selected_story(title, model, system_prompt) chatbot_answser_list[alphabet[index]] = {'response': resp, 'model': model} try: print(resp) resp_list.append(gr.Chatbot(value=[resp], visible=True, type='messages')) except gr.exceptions.Error: print(f"error for en modelo {model}") rest = [model for model in model_list if model not in models] for model in rest: try: resp_list.append(gr.Chatbot(type='messages', visible=False)) except gr.exceptions.Error: print(f"error, else en modelo {model}") try: resp_list.insert(0, gr.Chatbot(value=context, type='messages')) #chat_history ya se hace en send_selected_story except gr.exceptions.Error: print(f"error en main output\n {context}") return resp_list #inputs=[user_input, chatbot_main_output, model_checkbox, chat_radio, assistant_user_input, chatbot_resp[0], chatbot_resp[1], chatbot_resp[2], chatbot_resp[3]],# interaction_count], def remove_metadata(json_array): print(json_array) print(type(json_array)) json_aux = [] for json_obj in json_array: print(f'objeto{json_obj}') json_aux.append({'role':json_obj["role"], 'content':json_obj["content"]}) return json_aux # dont know the correct model beacuse it shuffles each time #selected model it's only the index in radio input def multiple_interact(query, models, selected_model, assistant_user_input): #, interaction_count) #print(f'chat_checkbox: {selected_model}') resp_list = [] #print(model_history) if selected_model == "user_input": user_dialog = [{'response': {'role': 'assistant', 'content': assistant_user_input}, 'model': 'user_input'}] dialog = { "context": remove_metadata(chat_history), "assistant": user_dialog + list(chatbot_answser_list.values()), "selected": "user_input", } chat_history.append({"role": "assistant", "content": assistant_user_input}) chat_history.append({"role": "user", "content": query}) else: dialog = { "context": remove_metadata(chat_history), "assistant": list(chatbot_answser_list.values()), "selected": None, } #chatbot_answser_list #get the previous answer of the selected model for index, model in enumerate(models): if alphabet[index] == selected_model: selected_model_history = chatbot_answser_list[selected_model]['response'] print(f"selected_model_history: {selected_model_history}") chat_history.append(selected_model_history) chat_history.append({"role": "user","content": query.strip()}) #si es la correcta guardarla dialog["selected"] = chatbot_answser_list[selected_model]['model'] break #APPE all_answers.append(dialog) #save to csv selected_model_history = {} #reset history #creo que no precisa aux_history = remove_metadata(chat_history) #print(aux_history) #no es models es.... random.shuffle(active_models) for index, model in enumerate(active_models): resp = interact_groq(aux_history, model) resp = {"role": "assistant", "content": resp.strip()} chatbot_answser_list[alphabet[index]] = {'response': resp, 'model': model} try: print(resp) resp_list.append(gr.Chatbot(value=[resp], visible=True, type='messages')) except gr.exceptions.Error: print(f"error for en modelo {model}") rest = [model for model in model_list if model not in active_models] for model in rest: try: resp_list.append(gr.Chatbot(type='messages', visible=False)) except gr.exceptions.Error: print(f"error, else en modelo {model}") resp_list.insert(0, gr.Chatbot(value=aux_history, type='messages')) model_history.append(selected_model) print(model_history) return resp_list # Function to load user guide from a file def load_user_guide(): with open('user_guide.txt', 'r') as file: return file.read() def change_textbox(checkbox): if checkbox == "user_input": return gr.Textbox(placeholder="Type your message here...", label="Assistant input", visible=True) else: return gr.Textbox(value="", visible=False) def change_checkbox(checkbox): print(f'checkbox: {checkbox}') #luego cuando sean variables global active_models active_models = checkbox quant_models = len(checkbox) words = [alphabet[i] for i in range(quant_models)] checkbox = gr.Radio(label="Select Model to respond...", choices=words+["user_input"]) #checkbox = gr.Radio(label="Select Model to respond...", choices=checkbox+["user_input"]) return checkbox def change_story(story_title, ret="gradio"): for story in stories: if story["title"] == story_title: if ret== "gradio": return gr.Textbox(label="Selected Story", lines=10, interactive=False, value=story["story"]) else: #"string" return story["story"] return gr.Textbox(label="Error", lines=10, interactive=False, value="Story title does not match.") chatbot_list = [] model_list = list(all_models.keys()) active_models = [] #chatbot_answer_list['model'] = "respuesta aqui" chatbot_answser_list = {} all_answers = [] #save all answers of all chatbots # Create the chat interface using Gradio Blocks active_models = [] with gr.Blocks() as demo: with gr.Tabs(): with gr.TabItem("Chat"): gr.Markdown("# Demo Chatbot V3") gr.Markdown("## Context") with gr.Group(): model_dropdown = gr.Dropdown(choices=list(all_models.keys()), label="Select Models", value=model_list[0]) user_dropdown = gr.Dropdown(choices=user_names, label="Select User Name") initial_story = stories[0]["title"] if stories else None story_dropdown = gr.Dropdown(choices=[story["title"] for story in stories], label="Select Story", value=initial_story) system_prompt_dropdown = gr.Dropdown(choices=system_prompts, label="Select System Prompt", value=system_prompts[0]) send_story_button = gr.Button("Send Story") gr.Markdown("## Chat") with gr.Group(): selected_story_textbox = gr.Textbox(label="Selected Story", lines=10, interactive=False) chatbot_output = gr.Chatbot(label="Chat History", type='messages') chatbot_input = gr.Textbox(placeholder="Type your message here...", label="User Input") send_message_button = gr.Button("Send") gr.Markdown("## Evaluation") with gr.Group(): score_input = gr.Slider(minimum=0, maximum=5, step=1, label="Score") comment_input = gr.Textbox(placeholder="Add a comment...", label="Comment") save_button = gr.Button("Save Score and Comment") data_table = gr.DataFrame(headers=["Chat History", "Score", "Comment"]) with gr.TabItem("User Guide"): gr.Textbox(label="User Guide", value=load_user_guide(), lines=20) with gr.TabItem("Multiple Evaluation"): with gr.Group(): #model_dropdown = gr.Dropdown(choices=list(all_models.keys()), label="Select Model", value=default_model_name) model_checkbox = gr.CheckboxGroup(choices=list(all_models.keys()), label="Select Model", value=None) #value=[default_model_name]) user_dropdown = gr.Dropdown(choices=user_names, label="Select User Name") story_dropdown = gr.Dropdown(choices=[story["title"] for story in stories], label="Select Story", value=initial_story) system_prompt_dropdown = gr.Dropdown(choices=system_prompts, label="Select System Prompt", value=system_prompts[0]) send_multiple_story_button = gr.Button("Send Story") gr.Markdown("## Chat") with gr.Group(): selected_story_textbox = gr.Textbox(label="Selected Story", lines=10, interactive=False, value=change_story(initial_story, "string")) #aqui armar una ventana x cada modelo seleccionado chatbot_list.append(gr.Chatbot(label="Chat History", type='messages')) with gr.Row(): for i, model in enumerate(model_list): label = f"Model {alphabet[i % len(alphabet)]}" aux = gr.Chatbot(label=label, visible=False, type='messages') chatbot_list.append(aux) user_input = gr.Textbox(placeholder="Type your message here...", label="User Input") #chat_radio = gr.Radio(choices=list(model_list)+["user_input"], label="Sent something to continue...", value=[model_list[0]]) chat_radio = gr.Radio(label="Select Model to respond...") #elegir respuesta primero, luego enviar mensaje assistant_user_input = gr.Textbox(interactive=True, show_copy_button=True, visible=False) send_multiple_message_button = gr.Button("Send") gr.Markdown("## Evaluation") with gr.Group(): score_input = gr.Slider(minimum=0, maximum=5, step=1, label="Score") comment_input = gr.Textbox(placeholder="Add a comment...", label="Comment") save_button_multievaluation = gr.Button("Save Score and Comment") data_table = gr.DataFrame(headers=["Chat History", "Score", "Comment"]) interaction_count = gr.Number(value=0, visible=False) selected_model_array = gr.List(value=None, visible=False) #input es las entradas a la funcion #output es las salidas de la funcion? puede ser lo que se creo si #send_story_button.click(fn=send_selected_story, inputs=[story_dropdown, model_dropdown, system_prompt_dropdown], outputs=[chatbot_output, chat_history_json, data_table, selected_story_textbox]) #send_message_button.click(fn=interact, inputs=[chatbot_input, chat_history_json, interaction_count, model_dropdown], outputs=[chatbot_input, chatbot_output, chat_history_json, interaction_count]) #save_button.click(fn=save_comment_score, inputs=[chatbot_output, score_input, comment_input, story_dropdown, user_dropdown, system_prompt_dropdown], outputs=[data_table, comment_input]) chat_radio.change(fn=change_textbox, inputs=chat_radio, outputs=assistant_user_input) #al elegir modelo cambia el chat radio, setea los modelos elegidos model_checkbox.input(fn=change_checkbox, inputs=model_checkbox, outputs=chat_radio) story_dropdown.input(fn=change_story, inputs=[story_dropdown], outputs=selected_story_textbox) send_multiple_story_button.click( fn=send_multiple_selected_story, inputs=[story_dropdown, model_checkbox, system_prompt_dropdown], outputs=chatbot_list, ) #Tengo que cambiar para que los modelos responan solo las respuestas y no todo el historial #preciso las historias previas de cada una #el modelo que se haya elegido #aqui mando a solicitar... #luego retorno: #en send_multiple_message_button.click( fn=multiple_interact, inputs=[user_input, model_checkbox, chat_radio, assistant_user_input],# interaction_count], outputs=chatbot_list, ) #quiza tenga que guardar una variable con los valores de los checkbox save_button_multievaluation.click( fn=save_comment_score, inputs=[score_input, comment_input, story_dropdown, user_dropdown, system_prompt_dropdown, model_checkbox], outputs=[data_table, comment_input]) demo.launch()