nachoremer commited on
Commit
25d5fbb
·
1 Parent(s): 3929c07

multichat json v1

Browse files
Files changed (4) hide show
  1. .gitignore +2 -0
  2. app.py +116 -104
  3. polar-land-440713-c4-bbc8d89804d8.json +13 -0
  4. requirements.txt +2 -1
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv/
2
+ __pycache__/
app.py CHANGED
@@ -1,27 +1,26 @@
1
  #https://discuss.huggingface.co/t/dynamical-flexible-output/18146/6
2
  #https://github.com/gradio-app/gradio/issues/2066
3
- import os
4
  import gradio as gr
5
  #from transformers import AutoModelForCausalLM, AutoTokenizer
6
  import pandas as pd
7
  from datetime import datetime, timedelta, timezone
8
  #import torch
9
- from config import groq_token, groq_model, QUESTION_PROMPT, init_google_sheets_client, huggingface_tokenizer, replicate_model,groq_model, default_model_name, user_names, google_sheets_name, MAX_INTERACTIONS
10
 
11
  #from config import hugging_face_token, replicate_token
12
  #import replicate
13
  import gspread
14
  from groq import Client
15
- import random, string
16
-
17
- from pydrive.auth import GoogleAuth
18
- from pydrive.drive import GoogleDrive
19
- import json
20
 
21
 
22
  # Initialize Google Sheets client
23
  client = init_google_sheets_client()
24
  sheet = client.open(google_sheets_name)
 
25
  stories_sheet = sheet.worksheet("Stories")
26
  system_prompts_sheet = sheet.worksheet("System Prompts")
27
 
@@ -72,104 +71,119 @@ data = []
72
  chat_history = []
73
  model_history = []
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
 
 
 
76
 
 
 
 
 
 
77
 
78
 
79
 
80
- from pydrive.auth import GoogleAuth
81
- from pydrive.drive import GoogleDrive
82
- import json
83
-
84
- # Autenticación y creación del cliente de Google Drive
85
- gauth = GoogleAuth()
86
- gauth.LocalWebserverAuth() # Esto abrirá una ventana del navegador para autenticarse
87
- drive = GoogleDrive(gauth)
88
-
89
  def save_comment_score(score, comment, story_name, user_name, system_prompt, models):
90
  print("Saving comment and score...")
91
  print(chat_history)
92
  print(model_history)
93
  full_chat_history = ""
94
 
95
- # Save all_answers to Google Drive
96
- file_name = 'all_answers.json'
97
-
98
- # Check if the file already exists in Google Drive
99
- file_list = drive.ListFile({'q': f"title='{file_name}' and trashed=false"}).GetList()
100
- if file_list:
101
- # File exists, download it
102
- gfile = file_list[0]
103
- gfile.GetContentFile(file_name)
104
- with open(file_name, 'r') as json_file:
105
- existing_data = json.load(json_file)
106
- existing_data.append(all_answers)
107
- else:
108
- # File does not exist, create new data
109
- existing_data = [all_answers]
110
-
111
- # Save updated data to the file
112
- with open(file_name, 'w') as json_file:
113
- json.dump(existing_data, json_file)
114
-
115
- # Upload the file to Google Drive
116
- gfile = drive.CreateFile({'title': file_name})
117
- gfile.SetContentFile(file_name)
118
- gfile.Upload()
119
- print(f"File {file_name} uploaded to Google Drive.")
120
-
121
- return None
122
-
123
-
124
-
125
-
126
-
127
-
128
-
129
-
130
-
131
- # Function to save comment and score
132
- #def save_comment_score(score, comment, story_name, user_name, system_prompt, models):
133
- # print("Saving comment and score...")
134
- # print(chat_history)
135
- # print(model_history)
136
- # full_chat_history = ""
137
- #
138
- # # Create formatted chat history with roles
139
- # #and model in model_history
140
- # for message in chat_history:
141
- # print(message['role'])
142
- # if message['role'] == 'user': # User message
143
- # full_chat_history += f"User: {message['content']}\n"
144
- # if message['role'] == 'assistant': # Assistant message
145
- # full_chat_history += f"Model:{model_history.pop(0)} Assistant: {message['content']}\n"
146
- #
147
- # timestamp = datetime.now(timezone.utc) - timedelta(hours=3) # Adjust to GMT-3
148
- # timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
149
- # model_name = (' ').join(models)
150
- # # Append data to local data storage
151
- # print(full_chat_history)
152
- # data.append([
153
- # timestamp_str,
154
- # user_name,
155
- # model_name,
156
- # system_prompt,
157
- # story_name,
158
- # full_chat_history,
159
- # score,
160
- # comment
161
- # ])
162
- #
163
- # # Append data to Google Sheets
164
- # try:
165
- # user_sheet = client.open(google_sheets_name).worksheet(user_name)
166
- # except gspread.exceptions.WorksheetNotFound:
167
- # user_sheet = client.open(google_sheets_name).add_worksheet(title=user_name, rows="100", cols="20")
168
- #
169
- # user_sheet.append_row([timestamp_str, user_name, model_name, system_prompt, story_name, full_chat_history, score, comment])
170
- #
171
- # df = pd.DataFrame(data, columns=["Timestamp", "User Name", "Model Name", "System Prompt", "Story Name", "Chat History", "Score", "Comment"])
172
- # return df[["Chat History", "Score", "Comment"]], gr.update(value="") # Show only the required columns and clear the comment input box
173
 
174
 
175
  # Function to handle interaction with model
@@ -272,26 +286,24 @@ def remove_metadata(json_array):
272
  # dont know the correct model beacuse it shuffles each time
273
  #selected model it's only the index in radio input
274
  def multiple_interact(query, models, selected_model, assistant_user_input): #, interaction_count)
275
- print(f'chat_checkbox: {selected_model}')
276
  resp_list = []
277
- print(model_history)
278
- #quito history ahora es una variable global
279
 
280
-
281
  if selected_model == "user_input":
282
- chat_history.append({"role": "assistant", "content": assistant_user_input})
283
- chat_history.append({"role": "user", "content": query})
284
-
285
  dialog = {
286
  "context": remove_metadata(chat_history),
287
- "assistant": assistant_user_input + chatbot_answser_list.values(),
288
  "selected": "user_input",
289
  }
 
 
290
 
291
  else:
292
  dialog = {
293
  "context": remove_metadata(chat_history),
294
- "assistant": chatbot_answser_list.values(),
295
  "selected": None,
296
  }
297
 
@@ -384,7 +396,7 @@ model_list = list(all_models.keys())
384
  active_models = []
385
  #chatbot_answer_list['model'] = "respuesta aqui"
386
  chatbot_answser_list = {}
387
- all_answers = {} #save all answers of all chatbots
388
  # Create the chat interface using Gradio Blocks
389
  active_models = []
390
  with gr.Blocks() as demo:
 
1
  #https://discuss.huggingface.co/t/dynamical-flexible-output/18146/6
2
  #https://github.com/gradio-app/gradio/issues/2066
 
3
  import gradio as gr
4
  #from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import pandas as pd
6
  from datetime import datetime, timedelta, timezone
7
  #import torch
8
+ from config import groq_token, groq_model, QUESTION_PROMPT, init_google_sheets_client, groq_model, default_model_name, user_names, google_sheets_name
9
 
10
  #from config import hugging_face_token, replicate_token
11
  #import replicate
12
  import gspread
13
  from groq import Client
14
+ import random, string, json, io
15
+ from googleapiclient.discovery import build
16
+ from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
17
+ from google.oauth2 import service_account # Import service_account module
 
18
 
19
 
20
  # Initialize Google Sheets client
21
  client = init_google_sheets_client()
22
  sheet = client.open(google_sheets_name)
23
+ #sheet = client.open_by_key('1kA37sJps3nhki-s9S7J_mQtNoqoWOLvezV0HobHzQ4s') ID planilla chatbot test nuevo
24
  stories_sheet = sheet.worksheet("Stories")
25
  system_prompts_sheet = sheet.worksheet("System Prompts")
26
 
 
71
  chat_history = []
72
  model_history = []
73
 
74
+ # Save all_answers to Google Drive
75
+ FILE_ID = '1PwEiBxpHo0jRc6T1HixyC99UnP9iawbr'
76
+
77
+
78
+ def save_answers(all_answers):
79
+ # Credenciales de la cuenta de servicio (reemplaza con tus credenciales)
80
+ SCOPES = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
81
+ SERVICE_ACCOUNT_FILE = 'polar-land-440713-c4-bbc8d89804d8.json'
82
+ # Autentificación
83
+ credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
84
+ service = build('drive', 'v3', credentials=credentials)
85
+
86
+ # Obtener el archivo existente
87
+ file = service.files().get(fileId=FILE_ID).execute()
88
+
89
+ # Download the content using get_media instead of export_media
90
+ request = service.files().get_media(fileId=FILE_ID)
91
+ fh = io.BytesIO()
92
+ downloader = MediaIoBaseDownload(fh, request)
93
+ done = False
94
+ while done is False:
95
+ status, done = downloader.next_chunk()
96
+ print("Download %d%%." % int(status.progress() * 100))
97
+
98
+ # Cargar el contenido JSON
99
+ content = fh.getvalue()
100
+ if content:
101
+ existing_data = json.loads(content)
102
+ else:
103
+ existing_data = {}
104
+
105
+ # Convert sets to lists before serialization if they exist
106
+ def convert_sets_to_lists(obj):
107
+ if isinstance(obj, set):
108
+ return list(obj)
109
+ if isinstance(obj, dict):
110
+ return {k: convert_sets_to_lists(v) for k, v in obj.items()}
111
+ if isinstance(obj, list):
112
+ return [convert_sets_to_lists(item) for item in obj]
113
+ return obj
114
+
115
+ existing_data = convert_sets_to_lists(existing_data)
116
+
117
+ # Agregar los nuevos datos al arreglo
118
+ if 'data' in existing_data:
119
+ existing_data['data'].append(all_answers)
120
+ else:
121
+ existing_data['data'] = [all_answers]
122
+ # Convertir los datos a formato JSON
123
+ new_content = json.dumps(existing_data)
124
 
125
+ # Create a temporary file to store the JSON data
126
+ with open('temp_data.json', 'w') as temp_file:
127
+ temp_file.write(new_content)
128
 
129
+ media = MediaFileUpload('temp_data.json', mimetype='application/json')
130
+ file = service.files().update(fileId=FILE_ID,
131
+ media_body=media,
132
+ fields='id').execute()
133
+ print('Archivo actualizado correctamente: %s' % file.get('id'))
134
 
135
 
136
 
137
+ #Function to save comment and score
 
 
 
 
 
 
 
 
138
  def save_comment_score(score, comment, story_name, user_name, system_prompt, models):
139
  print("Saving comment and score...")
140
  print(chat_history)
141
  print(model_history)
142
  full_chat_history = ""
143
 
144
+ # Create formatted chat history with roles
145
+ #and model in model_history
146
+ for message in chat_history:
147
+ print(message['role'])
148
+ if message['role'] == 'user': # User message
149
+ full_chat_history += f"User: {message['content']}\n"
150
+ if message['role'] == 'assistant': # Assistant message
151
+ full_chat_history += f"Model:{model_history.pop(0)} Assistant: {message['content']}\n"
152
+
153
+ timestamp = datetime.now(timezone.utc) - timedelta(hours=3) # Adjust to GMT-3
154
+ timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
155
+ model_name = (' ').join(models)
156
+ # Append data to local data storage
157
+ print(full_chat_history)
158
+ data.append([
159
+ timestamp_str,
160
+ user_name,
161
+ model_name,
162
+ system_prompt,
163
+ story_name,
164
+ full_chat_history,
165
+ score,
166
+ comment
167
+ ])
168
+
169
+ # Append data to Google Sheets
170
+ try:
171
+ user_sheet = client.open(google_sheets_name).worksheet(user_name)
172
+ except gspread.exceptions.WorksheetNotFound:
173
+ user_sheet = client.open(google_sheets_name).add_worksheet(title=user_name, rows="100", cols="20")
174
+
175
+ user_sheet.append_row([timestamp_str, user_name, model_name, system_prompt, story_name, full_chat_history, score, comment])
176
+
177
+
178
+ # Save all answers to Google Drive as a JSON file
179
+ print(f"all answers...\n{all_answers}")
180
+ save_answers(all_answers)
181
+
182
+ #Append data and render the data table
183
+ df = pd.DataFrame(data, columns=["Timestamp", "User Name", "Model Name", "System Prompt", "Story Name", "Chat History", "Score", "Comment"])
184
+ return df[["Chat History", "Score", "Comment"]], gr.update(value="") # Show only the required columns and clear the comment input box
185
+
186
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
 
189
  # Function to handle interaction with model
 
286
  # dont know the correct model beacuse it shuffles each time
287
  #selected model it's only the index in radio input
288
  def multiple_interact(query, models, selected_model, assistant_user_input): #, interaction_count)
289
+ #print(f'chat_checkbox: {selected_model}')
290
  resp_list = []
291
+ #print(model_history)
 
292
 
 
293
  if selected_model == "user_input":
294
+ user_dialog = [{'response': {'role': 'assistant', 'content': assistant_user_input}, 'model': 'user_input'}]
 
 
295
  dialog = {
296
  "context": remove_metadata(chat_history),
297
+ "assistant": user_dialog + list(chatbot_answser_list.values()),
298
  "selected": "user_input",
299
  }
300
+ chat_history.append({"role": "assistant", "content": assistant_user_input})
301
+ chat_history.append({"role": "user", "content": query})
302
 
303
  else:
304
  dialog = {
305
  "context": remove_metadata(chat_history),
306
+ "assistant": list(chatbot_answser_list.values()),
307
  "selected": None,
308
  }
309
 
 
396
  active_models = []
397
  #chatbot_answer_list['model'] = "respuesta aqui"
398
  chatbot_answser_list = {}
399
+ all_answers = [] #save all answers of all chatbots
400
  # Create the chat interface using Gradio Blocks
401
  active_models = []
402
  with gr.Blocks() as demo:
polar-land-440713-c4-bbc8d89804d8.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "service_account",
3
+ "project_id": "polar-land-440713-c4",
4
+ "private_key_id": "bbc8d89804d81d789130b20ef8a78fc48e1adecb",
5
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDCZpk9tkqLc8jP\nWMHO7t8RwmAeDBMhVoRtRfjsuOxcG2mNzU4jCF1pqX6Ot1d46xLtKepv++aMDaFb\nlnOJxFYUIgcq2pXfGSVE23YPwLcsxM/zHszxsGZbTQ7XhlhPzUp8Q/hJAkWemn46\nJRF5KvqZfPCWoKKts7OIHSnio4UellU+HBcbpdH2o7DttYCiDk0/4OSQmBnsr2xy\ngV9NgNXQTLwuT4CfWxLkNaHbWan3jCumlvCAv65s1Sc16iIFB9m1VTkxbA4IG04/\nMaQYTsw6UQ3Y8k+Rt1kpCEJPHk4J8aG5BTw9mhXJsr+KTTYT1/+4/Vupz/fn8mG8\nWyY9LhSBAgMBAAECggEAAaFVy8+ayJhtY2xpiahKp23jcPUQdwEwUJtGMbQA9Wpx\nxD49F2xInkF33c9Z9NoMeVUr5wmsKVoBk6X2F5PAK2CNV1W9woEbOnyC/Qb/HOlp\n8K5o27urNrz1cNIVV6v6Idh5vUup1PaTiZN0gKPfNfN3MAguUmRqpGum2tnyB0Df\nN5JyPkoUSmMZ5D4ukd5vyFD9lVqA8DC//S3M3YhHA4Ri62DVyQfIPtbNDjKvDdJV\nhqfXoQuWhAi4BhsCgUbhjGLiM+nC/GvdCTMMmL8JB1rdqHm4WzzDrooOzvfNkDq2\nB3/SiHzzV117+2wrZpR/E66iUTisHn1ykNgiGbOl7QKBgQD5/V/gP65NHmpOv+cK\nbYnWTPkOXbulJp8pfL8V1SKTNf18gdEp54YNG2XMOXN3tyyuFU4b7HV+D1a/p60S\nT1hh0bv09J12wisnmlWkbPx1s206NZZQjU2Q8Dmph2fe3eaQvKiN8xImhkqKeHa4\n6HYe0qKQHMWGCiCZWIh73Tj5tQKBgQDHExZvHLgX0pGGMkZqsQpQUwGod4b2BcCu\nIPutWO/1c7ueCCOV29yoVWLsayv9JZo24tLFVm4u1VR0cnfvPcsISAMKfpR9Vexd\n4/m2FtyX8kTTNRjrqs4zhtWnopcYFv4szCR1UWjEJT/okL0RfZFRWRP0Lq5KCr0l\nEO7VdhV/HQKBgFW+6bbSQOfkydPvEMFL5x5hUn/x+J3iDG+/gQ9tBqn1t9eP831r\nE1jFj/onropegu3HEv6S8Y3mlvuv+LYyDv+mX9DaYnRMj17LGhsG4uVAQPPZU5CX\nlwZiz3RtpFwWQrJcc3gU6qHaKBEs1pP/8BwMZYQGyl16dcoDSvUpYRSNAoGBAKiO\nTtZxmKYDElhvxROdJ0Bvp92+prU0lbpaDv7taO+IuHjMJ2VBrYmSEppe+/XxmquS\nTifsUIqy8jlOmqIkzf4LCxgdqRieYe31k39LyCeXaBL0yE7yZnlWVx0DG6+3+Cpy\nBwSWk4BQkdYWhx+CFEKui6iaxdPHV/NEbRSsUPM5AoGBAKV8VBeOKygkTMyVkvfr\nN4xNZlLQaSCi0b2crUdhURoHaAMuxmrURIYbWiK9LrNaHugRz78KzkgaOAfHPq45\nnM/6RjDpkkA7ouX620+xSL0cFoVaXuYVLdLkxl7SyDaVPYFqqYsf36MOzsPekYzz\nKN8C9b4Vic/jM76HbFSJ/TK4\n-----END PRIVATE KEY-----\n",
6
+ "client_email": "[email protected]",
7
+ "client_id": "108705807236254785411",
8
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
9
+ "token_uri": "https://oauth2.googleapis.com/token",
10
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
11
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/robot-481%40polar-land-440713-c4.iam.gserviceaccount.com",
12
+ "universe_domain": "googleapis.com"
13
+ }
requirements.txt CHANGED
@@ -8,4 +8,5 @@ oauth2client
8
  #accelerate
9
  #bitsandbytes
10
  replicate
11
- groq
 
 
8
  #accelerate
9
  #bitsandbytes
10
  replicate
11
+ groq
12
+ gradio