date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | kittyjosh111/gptChat | discordBot~beta.py | import openai
import os
import json
import discord
from discord.ext import commands
import shutil
openai.api_key = ""
discord_bot_token = ''
triggers = ["hey gpt"] #This is how to activate the bot. The bot will respond when it detects this word in any message sent on a server.
#This is a simple script to converse with OpenAI's GPT models. It tries to keep persistence between chats by creating a file to store logs of the past conversations, here known as neuralcloud_discord.ncb.
#Model responses are also written to a log.log for further reference.
#This script uses the chat model, or currently the gpt-3.5 model that is similar to ChatGPT.
#################
### Variables ###
#model is the used OpenAI model. Check their website for different model names.
#https://platform.openai.com/docs/models/overview
model="gpt-3.5=turbo"
#the prompt is what the model will read for to create the response.
#Do not include the initial human prompt, just the description of what the model's pesonality should be like.
base_prompt="""The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly."""
#Limit for how many pieces of dialogue the model should remember before summarizing the previous conversations back into the prompt.
#This is used as a way to prolong how much the model can talk to us before hitting the overall token limit.
limit_length=50
#################
#################
#First, a function to save the memory variable to the ncb. I will use this a lot, so it works best as a function.
def save_ncb():
with open('neuralcloud_discord.ncb', 'w') as save:
save.write(json.dumps(memory))
#Initialize my custom memory file. Basically, a text file to log everything we've written and then reuse it as the prompt for future prompts.
#First we check if there already exists a neural cloud file. If not, then we create the ncb file and wrtie the prompt to it.
#Its Like waking up their neural cloud for the first time. Otherwise, its just restoring their neural clouds.
memory=[] #unlike the gpt3 script, we use a variable to store memory here.
ncb = './neuralcloud_discord.ncb'
check = os.path.isfile(ncb)
if check:
with open('neuralcloud_discord.ncb') as read:
output = read.read()
formatted_list = json.loads(output)
memory = formatted_list #These steps allow the model to have past dialogues loaded as a python list
else:
memory.append({"role": "system", "content": f"{base_prompt}"}, ) #creating the file with the system prompt
memory.append({"role": "user", "content": "Hello."}, )
save_ncb() #So the model's first words are a greeting to the user.
#################
### Functions ###
#Function for the api request so that I don't have to copy paste this over and over again.
def api_request(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt
)
api_request.response = response['choices'][0]['message']['content'].strip()
memory.append({"role": "assistant", "content": f"{api_request.response}"}, ) #write to the memory variable
save_ncb() #save memory to ncb after generation of response
log = open("log_discord.log", "a")
log.write("\n" + "Assistant: " + api_request.response) #Write to log
log.close()
#Function to determine how to compress the ncb
def cleaner():
global memory
if len(memory) >= limit_length:
# GOALS:
# Make the summaries additive rather than replacing them altogether. Consider modifying the log file by adding in the previous summary as well.
# IMPLEMENTED as putting in the new_prompt into the log before the user / assistant dialogue
# CHECK TO SEE IF IT WORKS
##Summarizer
print("Cleaning up neural cloud. Please wait...") #print so that user can see what is going on
with open('log_discord.log') as read: #the log will show both user and assistant dialog. This makes it perfect for the summarizer.
output = read.read()
query="Only summarize the following conversation into one line from the perspective of the assistant. Do not explain." + '"' + output + '"' #this is the prompt for summary sent to the api
summary=[] #again, the api requires a list rather than text
summary.append({"role": "system", "content": f"{query}"}, )
summarize = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=summary
)
summarize.response = summarize['choices'][0]['message']['content'].strip()
new_prompt=base_prompt + "\n" + "A summary of their previous conversation is as follows: " + summarize.response #now we need to replace the old memory variable with the new prompt
memory=[] #blank out the memory variable
memory.append({"role": "system", "content": f"{new_prompt}"}, ) #add in the new prompt (base_prompt + summary) to the memory variable
## File manipulation First we remove both backup files, should they exist
if os.path.exists("neuralcloud_discord.ncb.bk"):
os.remove("neuralcloud_discord.ncb.bk")
else:
pass
if os.path.exists("log_discord.log.bk"):
os.remove("log_discord.log.bk")
else:
pass
original_ncb = r'neuralcloud_discord.ncb'
backup_ncb = r'neuralcloud_discord.ncb.bk' #makes the ncb backup
shutil.copyfile(original_ncb, backup_ncb)
original_log = r'log_discord.log'
backup_log = r'log_discord.log.bk' #makes the log backup
shutil.copyfile(original_log, backup_log)
os.remove("neuralcloud_discord.ncb")
os.remove("log_discord.log") #remove both original files
save_ncb() #make a new ncb file, with the new_prompt as the system content
log = open("log_discord.log", "a")
log.write("A summary of the previous conversation is as follows: " + summzarize.response) #Write to log the summary part as well, just so that we don't lose bits of the memory from pre-clean.
log.close()
else:
pass
#################################################################################
# This is the discord bot portion.
intents = discord.Intents().all()
client = commands.Bot(command_prefix=',', intents=intents)
@client.event
async def on_ready():
print('online')
print(memory)
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.author.bot:
return
for i in range(len(triggers)):
if triggers[i].lower() in message.content.lower():
cleaner()
memory.append({"role": "user", "content": message.content}, )
save_ncb()
api_request(memory)
print(api_request.response)
await message.channel.send(api_request.response)
client.run(discord_bot_token)
| [
"\n",
"The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.",
"base_prompt + \"\\n\" + \"A summary of their previous conversation is as follows: \" + summarize.response",
"Hello.",
"A summary of their previous conversation is as follows: ",
"Only summarize the following conversation into one line from the perspective of the assistant. Do not explain.\"PLACEHOLDER\""
] |
2024-01-10 | kittyjosh111/gptChat | discordBot~discord_chatGPT.py | import openai
import os
import json
import discord
from discord.ext import commands
openai.api_key = ""
discord_bot_token = ''
triggers = ["hey gpt"] #This is how to activate the bot. The bot will respond when it detects this word in any message sent on a server.
#This is a simple script to converse with OpenAI's GPT models. It tries to keep persistence between chats by creating a file to store logs of the past conversations, here known as neuralcloudv2.ncb.
#Model responses are also written to a log.log for further reference.
#This script uses the chat model, or currently the gpt-3.5 model that is similar to ChatGPT.
#This script also will take output and inputs by interfacing with discord. Fun!
#################
### Variables ###
#counter variable that determines whether to begin with the model or the user
counter = 0
#model is the used OpenAI model. Check their website for different model names.
#https://platform.openai.com/docs/models/overview
model="gpt-3.5=turbo"
#the prompt is what the model will read for to create the response.
#Do not include the initial human prompt, just the description of what the model's pesonality should be like.
base_prompt="""The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly."""
#################
#################
#First, a function to save the memory variable to the ncb. I will use this a lot, so it works best as a function.
def save_ncb():
with open('neuralcloud_discord.ncb', 'w') as save:
save.write(json.dumps(memory))
#Initialize my custom memory file. Basically, a text file to log everything we've written and then reuse it as the prompt for future prompts.
#First we check if there already exists a neural cloud file. If not, then we create the ncb file and wrtie the prompt to it.
#Its Like waking up their neural cloud for the first time. Otherwise, its just restoring their neural clouds.
memory=[] #unlike the gpt3 script, we use a variable to store memory here.
ncb = './neuralcloud_discord.ncb'
check = os.path.isfile(ncb)
if check:
with open('neuralcloud_discord.ncb') as read:
output = read.read()
formatted_list = json.loads(output)
memory = formatted_list #These steps allow the model to have past dialogues loaded as a python list
else:
memory.append({"role": "system", "content": f"{base_prompt}"}, ) #creating the file with the system prompt
memory.append({"role": "user", "content": "Hello."}, )
save_ncb() #So the model's first words are a greeting to the user.
counter = 1 #now the model goes first.
#################
### Functions ###
#Function for the api request so that I don't have to copy paste this over and over again.
def api_request(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt
)
api_request.response = response['choices'][0]['message']['content'].strip()
memory.append({"role": "assistant", "content": f"{api_request.response}"}, ) #write to the memory variable
save_ncb() #save memory to ncb after generation of response
log = open("logv2.log", "a")
log.write("\n" + api_request.response) #Write to log
log.close()
#################################################################################
# This is the discord bot portion.
intents = discord.Intents().all()
client = commands.Bot(command_prefix=',', intents=intents)
@client.event
async def on_ready():
print('online')
print(memory)
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.author.bot: return
for i in range(len(triggers)):
if triggers[i].lower() in message.content.lower():
memory.append({"role": "user", "content": message.content}, )
save_ncb()
api_request(memory)
print(api_request.response)
await message.channel.send(api_request.response)
client.run(discord_bot_token) | [
"Hello.",
"PLACEHOLDER",
"The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly."
] |
2024-01-10 | pbkowalski/wybory-NLP | keyword_extraction.py | import json
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
from sqlitedict import SqliteDict
db = SqliteDict('./db.sqlite', autocommit=True)
#from llama_cpp.llama import Llama, LlamaGrammar
import httpx
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size = 800,
chunk_overlap = 60,
length_function = len,
add_start_index = True,
)
def extract_keywords(tekst, llm_chain):
texts = text_splitter.create_documents([tekst])
responses = []
for doc in texts:
response = llm_chain.run(doc.page_content)
responses.append(response)
return responses
template = """<s>[INST]<<SYS>> Wymień słowa kluczowe z dokumentu, oddzielone przecinkami: <</SYS>>
Dokument: Szanowna Pani Marszałek! Abraham Lincoln miał takie powiedzenie: Czasem możesz kogoś oszukać, ale nie możesz okłamywać wszystkich cały czas. Premier Morawiecki konsekwentnie stara się tę mądrość obalić. Nic dziwnego, że pana wystąpienie zostało nazwane exposé kłamstw. I pozostał symbol tego wystąpienia - prezes Marian Banaś tam siedzący, oklaskujący na stojąco premiera. Każdego dnia pojawiają się nowe informacje, jak jego współpracownicy okradali, oszukując na VAT, polskich emerytów, pacjentów i niepełnosprawnych. Premier Morawiecki może mówić, że stworzył dobrobyt i Polacy na Wyspach pakują już walizki, żeby tu wrócić. A jaka jest, Wysoka Izbo, sytuacja? W 2018 r. zmarło 414 tys. osób, najwięcej od II wojny światowej, zapaść służby zdrowia, armagedon na SOR i rekordowy poziom skrajnego ubóstwa. Żyje w nim 5,4% Polaków. Wstyd, panie premierze, za te kłamstwa.[/INST]
Słowa kluczowe: służba zdrowia, ubóstwo, kłamstwa, exposé, afery </s>
<s>[INST]Dokument: Jacka Rostowskiego ze słynnym: na te obietnice, które składa Prawo i Sprawiedliwość, pieniędzy nie ma i w ciągu 4 najbliższych lat nie będzie, czy też samego Donalda Tuska: jeżeli ktoś wie, gdzie leżą zakopane w Polsce miliardy, które można porozdawać ludziom, to nie powinien z tym zwlekać.Z tego miejsca odpowiem panu premierowi Tuskowi. Tymi osobami, które wiedziały, gdzie nie są zakopane, ale ukradzione przez mafie VAT-owskie pieniądze, byli pan prezes Jarosław Kaczyński oraz pan premier Mateusz Morawiecki.Również minister Banaś, tak jest.Ale oczywiście o sukcesach polskiej gospodarki świadczy nie tylko wzrost przychodów budżetowych. Wszak do woli możemy żonglować wskaźnikami finansowymi i gospodarczymi. Bezrobocie z poziomu 8% w 2015 r. zjechało do 3,3% według najnowszych danych[/INST]
Słowa kluczowe: gospodarka, finanse, bezrobocie, mafia VAT-owska </s>
<s>[INST]Dokument: {question} [/INST]
Słowa kluczowe:
"""
#grammar_text = httpx.get("https://raw.githubusercontent.com/ggerganov/llama.cpp/master/grammars/list.gbnf").text
prompt = PromptTemplate(template=template, input_variables=["question"])
#grammar = LlamaGrammar.from_string(grammar_text)
#LlamaCpp.update_forward_refs()
llm = LlamaCpp(
model_path="../models/trurl-2-13b-instruct-q4_K_M.gguf",
verbose=True,
temperature=0.5,
n_ctx=4096,
n_gpu_layers=30,
mlock = True,
stop = ['</s>'],
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
#for i in range(110, 111):
#get list of files in directory
files = os.listdir(r"C:\Users\pawel\Development\Sejm_Scraper")
#filter files which begin with posiedzenie and are json files
files = [file for file in files if file.startswith("posiedzenie") and file.endswith(".json")]
for i in range(1,len(files)+1):
with open(f"C:\\Users\\pawel\\Development\\Sejm_Scraper\\posiedzenie_{i}.json", 'r', encoding = 'utf8') as fin:
posiedzenie = json.load(fin)
db_posiedzenie = SqliteDict('./db.sqlite',tablename=f'posiedzenie{i}', autocommit=True)
for j in range(len(posiedzenie)):
entry = db_posiedzenie.get(str(j), None)
if not entry:
print(f"Posiedzenie {i}, przemowienie {j}")
kwords = extract_keywords(posiedzenie[j]['tekst'], llm_chain)
print(f"Response: {kwords}")
to_posiedzenie = posiedzenie[j]
to_posiedzenie['keywords'] = kwords
db_posiedzenie[str(j)] = to_posiedzenie
db_posiedzenie.close()
# responses = {}
# for i in range(len(posiedzenie1)):
# wypowiedz = posiedzenie1[i]['tekst']
# texts = text_splitter.create_documents([wypowiedz])
# responses[i] = []
# for doc in texts:
# response = llm_chain.run(doc.page_content)
# print("DOKUMENT:")
# print(doc)
# print("SŁOWA KLUCZOWE:")
# print(response)
# responses[i].append(response)
# db[str(i)] = str(responses[i])
db.close()
| [
"question",
"<s>[INST]<<SYS>> Wymień słowa kluczowe z dokumentu, oddzielone przecinkami: <</SYS>>\nDokument: Szanowna Pani Marszałek! Abraham Lincoln miał takie powiedzenie: Czasem możesz kogoś oszukać, ale nie możesz okłamywać wszystkich cały czas. Premier Morawiecki konsekwentnie stara się tę mądrość obalić. Nic dziwnego, że pana wystąpienie zostało nazwane exposé kłamstw. I pozostał symbol tego wystąpienia - prezes Marian Banaś tam siedzący, oklaskujący na stojąco premiera. Każdego dnia pojawiają się nowe informacje, jak jego współpracownicy okradali, oszukując na VAT, polskich emerytów, pacjentów i niepełnosprawnych. Premier Morawiecki może mówić, że stworzył dobrobyt i Polacy na Wyspach pakują już walizki, żeby tu wrócić. A jaka jest, Wysoka Izbo, sytuacja? W 2018 r. zmarło 414 tys. osób, najwięcej od II wojny światowej, zapaść służby zdrowia, armagedon na SOR i rekordowy poziom skrajnego ubóstwa. Żyje w nim 5,4% Polaków. Wstyd, panie premierze, za te kłamstwa.[/INST]\nSłowa kluczowe: służba zdrowia, ubóstwo, kłamstwa, exposé, afery </s>\n<s>[INST]Dokument: Jacka Rostowskiego ze słynnym: na te obietnice, które składa Prawo i Sprawiedliwość, pieniędzy nie ma i w ciągu 4 najbliższych lat nie będzie, czy też samego Donalda Tuska: jeżeli ktoś wie, gdzie leżą zakopane w Polsce miliardy, które można porozdawać ludziom, to nie powinien z tym zwlekać.Z tego miejsca odpowiem panu premierowi Tuskowi. Tymi osobami, które wiedziały, gdzie nie są zakopane, ale ukradzione przez mafie VAT-owskie pieniądze, byli pan prezes Jarosław Kaczyński oraz pan premier Mateusz Morawiecki.Również minister Banaś, tak jest.Ale oczywiście o sukcesach polskiej gospodarki świadczy nie tylko wzrost przychodów budżetowych. Wszak do woli możemy żonglować wskaźnikami finansowymi i gospodarczymi. Bezrobocie z poziomu 8% w 2015 r. zjechało do 3,3% według najnowszych danych[/INST]\nSłowa kluczowe: gospodarka, finanse, bezrobocie, mafia VAT-owska </s>\n<s>[INST]Dokument: {question} [/INST]\nSłowa kluczowe:\n"
] |
2024-01-10 | pbkowalski/wybory-NLP | keyword_extraction_google_cloud.py | import json
from langchain.llms import LlamaCpp
from langchain.llms import HuggingFaceEndpoint
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from google.cloud import storage
from google.cloud.sql.connector import Connector
from google.auth import compute_engine
import pymysql.cursors
from dotenv import load_dotenv
import re
load_dotenv()
credentials = compute_engine.Credentials()
#Initialize GC Storage
storage_client = storage.Client()
bucket_name = os.getenv("Google_cloud_bucket_name")
bucket = storage_client.bucket(bucket_name)
endpoint_url = os.getenv("Huggingface_endpoint_url")
use_hf = os.getenv("use_hf_endpoint")
#HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
# Create a Google Cloud SQL connection using a service account
connector = Connector()
conn = connector.connect(instance_connection_string=os.getenv("Google_cloud_connection_name"),
db=os.getenv("database_name"),
user=os.getenv("database_user"),
password=os.getenv("database_password"),
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
driver = 'pymysql',
autocommit=True)
cursor = conn.cursor()
#Langchain setup
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 800,
chunk_overlap = 60,
length_function = len,
add_start_index = True,
)
#initialize llm_chain only when it is required
def start_llm_chain():
if use_hf == "True":
llm = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
verbose=True,
task='text-generation',
model_kwargs = {
'temperature' : 0.5,
'stop' : ['</s>'],
'max_length': 250,
'max_new_tokens': 100
}
)
else:
llm = LlamaCpp(
model_path="../models/trurl-2-13b-instruct-q4_K_M.gguf",
verbose=True,
temperature=0.5,
n_ctx=4096,
n_gpu_layers=30,
mlock = True,
stop = ['</s>'],
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
return llm_chain
def extract_keywords(tekst, llm_chain):
texts = text_splitter.create_documents([tekst])
responses = []
if llm_chain is None:
llm_chain = start_llm_chain()
for doc in texts:
while(True):
try:
response = llm_chain.run(doc.page_content)
break
except Exception as e:
print(e)
responses.append(response)
return responses
template = """<s>[INST]<<SYS>> Wymień słowa kluczowe z dokumentu, oddzielone przecinkami: <</SYS>>
Dokument: Szanowna Pani Marszałek! Abraham Lincoln miał takie powiedzenie: Czasem możesz kogoś oszukać, ale nie możesz okłamywać wszystkich cały czas. Premier Morawiecki konsekwentnie stara się tę mądrość obalić. Nic dziwnego, że pana wystąpienie zostało nazwane exposé kłamstw. I pozostał symbol tego wystąpienia - prezes Marian Banaś tam siedzący, oklaskujący na stojąco premiera. Każdego dnia pojawiają się nowe informacje, jak jego współpracownicy okradali, oszukując na VAT, polskich emerytów, pacjentów i niepełnosprawnych. Premier Morawiecki może mówić, że stworzył dobrobyt i Polacy na Wyspach pakują już walizki, żeby tu wrócić. A jaka jest, Wysoka Izbo, sytuacja? W 2018 r. zmarło 414 tys. osób, najwięcej od II wojny światowej, zapaść służby zdrowia, armagedon na SOR i rekordowy poziom skrajnego ubóstwa. Żyje w nim 5,4% Polaków. Wstyd, panie premierze, za te kłamstwa.[/INST]
Słowa kluczowe: służba zdrowia, ubóstwo, kłamstwa, exposé, afery </s>
<s>[INST]Dokument: Jacka Rostowskiego ze słynnym: na te obietnice, które składa Prawo i Sprawiedliwość, pieniędzy nie ma i w ciągu 4 najbliższych lat nie będzie, czy też samego Donalda Tuska: jeżeli ktoś wie, gdzie leżą zakopane w Polsce miliardy, które można porozdawać ludziom, to nie powinien z tym zwlekać.Z tego miejsca odpowiem panu premierowi Tuskowi. Tymi osobami, które wiedziały, gdzie nie są zakopane, ale ukradzione przez mafie VAT-owskie pieniądze, byli pan prezes Jarosław Kaczyński oraz pan premier Mateusz Morawiecki.Również minister Banaś, tak jest.Ale oczywiście o sukcesach polskiej gospodarki świadczy nie tylko wzrost przychodów budżetowych. Wszak do woli możemy żonglować wskaźnikami finansowymi i gospodarczymi. Bezrobocie z poziomu 8% w 2015 r. zjechało do 3,3% według najnowszych danych[/INST]
Słowa kluczowe: gospodarka, finanse, bezrobocie, mafia VAT-owska </s>
<s>[INST]Dokument: {question} [/INST]
Słowa kluczowe:"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = start_llm_chain()
#Get list of relevant files in GC Storage
blobs = [blob for blob in bucket.list_blobs() if "posiedzenie" in blob.name and blob.name.endswith(".json")]
rows = []
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'posiedzenia' AND table_name NOT LIKE 'entries';")
tables = cursor.fetchall()
print("Querying database...")
for table in tables:
cursor.execute(f"SELECT posiedzenie, nr_wypowiedzi, dzien FROM {table['TABLE_NAME']}")
rows.extend(cursor.fetchall())
print(f"Querying yielded {len(rows)} speeches")
for blob in blobs:
#load json from Google Cloud Storage
posiedzenie = json.loads(blob.download_as_string())
nr_posiedzenia = int(blob.name.split('/')[-1].split("_")[1].split(".")[0])
#get table name from blob name
table_name = f"posiedzenie{nr_posiedzenia}"
#create table if it does not exist
headers = list(posiedzenie[0].keys())
if 'keywords' not in headers:
headers.append('keywords')
types = ["CHAR(10)","TEXT", "TEXT", "TEXT", "TEXT", "TINYINT", "TINYINT", "SMALLINT", "TEXT"]
typesdict = {list(headers)[i]:types[i] for i in range(len(headers))}
create_table_sql = f"CREATE TABLE IF NOT EXISTS {table_name} ("
for col in headers:
col_name = col
col_type = typesdict[col]
create_table_sql += f"{col_name} {col_type}, "
create_table_sql = create_table_sql.rstrip(', ') + ");"
cursor.execute(create_table_sql)
#get all sppeches already in database
#iterate over speeches in session
for przemowienie in posiedzenie:
nr_wypowiedzi = przemowienie['nr_wypowiedzi']
#check if already in database
if not [d for d in rows if d['posiedzenie'] == przemowienie['posiedzenie'] and d['nr_wypowiedzi'] == przemowienie['nr_wypowiedzi'] and d['dzien'] == przemowienie['dzien']]:
dict_repr = przemowienie.copy()
print(f"Posiedzenie {nr_posiedzenia}, dzien {przemowienie['dzien']}, przemowienie {nr_wypowiedzi}")
kwords = extract_keywords(przemowienie['tekst'], llm_chain)
print(f"Response: {kwords}")
keywords = ','.join(kwords)
kw_as_list = keywords.split(',')
kw_cleaned = [re.sub(r'[^\w\s]','',x.replace('\\n','')).strip() for x in kw_as_list if re.search('\w{4,}',x)]
dict_repr['keywords'] = ','.join(kw_cleaned)
columns = ', '.join(dict_repr.keys())
values = tuple(dict_repr.values())
insert_query = f"INSERT INTO {table_name} ({columns}) VALUES"
insert_query = insert_query + " (" + "%s,"*(len(values)-1) + "%s)"
print(f"Cleaned keywords: {','.join(kw_cleaned)}")
#print(insert_query)
cursor.execute(insert_query, values )
conn.close()
| [
"question",
"<s>[INST]<<SYS>> Wymień słowa kluczowe z dokumentu, oddzielone przecinkami: <</SYS>>\nDokument: Szanowna Pani Marszałek! Abraham Lincoln miał takie powiedzenie: Czasem możesz kogoś oszukać, ale nie możesz okłamywać wszystkich cały czas. Premier Morawiecki konsekwentnie stara się tę mądrość obalić. Nic dziwnego, że pana wystąpienie zostało nazwane exposé kłamstw. I pozostał symbol tego wystąpienia - prezes Marian Banaś tam siedzący, oklaskujący na stojąco premiera. Każdego dnia pojawiają się nowe informacje, jak jego współpracownicy okradali, oszukując na VAT, polskich emerytów, pacjentów i niepełnosprawnych. Premier Morawiecki może mówić, że stworzył dobrobyt i Polacy na Wyspach pakują już walizki, żeby tu wrócić. A jaka jest, Wysoka Izbo, sytuacja? W 2018 r. zmarło 414 tys. osób, najwięcej od II wojny światowej, zapaść służby zdrowia, armagedon na SOR i rekordowy poziom skrajnego ubóstwa. Żyje w nim 5,4% Polaków. Wstyd, panie premierze, za te kłamstwa.[/INST]\nSłowa kluczowe: służba zdrowia, ubóstwo, kłamstwa, exposé, afery </s>\n<s>[INST]Dokument: Jacka Rostowskiego ze słynnym: na te obietnice, które składa Prawo i Sprawiedliwość, pieniędzy nie ma i w ciągu 4 najbliższych lat nie będzie, czy też samego Donalda Tuska: jeżeli ktoś wie, gdzie leżą zakopane w Polsce miliardy, które można porozdawać ludziom, to nie powinien z tym zwlekać.Z tego miejsca odpowiem panu premierowi Tuskowi. Tymi osobami, które wiedziały, gdzie nie są zakopane, ale ukradzione przez mafie VAT-owskie pieniądze, byli pan prezes Jarosław Kaczyński oraz pan premier Mateusz Morawiecki.Również minister Banaś, tak jest.Ale oczywiście o sukcesach polskiej gospodarki świadczy nie tylko wzrost przychodów budżetowych. Wszak do woli możemy żonglować wskaźnikami finansowymi i gospodarczymi. Bezrobocie z poziomu 8% w 2015 r. zjechało do 3,3% według najnowszych danych[/INST]\nSłowa kluczowe: gospodarka, finanse, bezrobocie, mafia VAT-owska </s>\n<s>[INST]Dokument: {question} [/INST]\nSłowa kluczowe:"
] |
2024-01-10 | cyph3rryx/ChatBot | Optimized_V1.py | import openai
def chatbot(prompt, api_key):
try:
completions = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
api_key=api_key
)
message = completions.choices[0].text
return message
except Exception as e:
print("Error in chatbot:", e)
return None
while True:
user_input = input("\nUser: ")
if user_input == "exit":
break
response = chatbot(user_input, "YOUR-API")
if response is not None:
print("\nChatbot:", response)
else:
print("Error in chatbot")
| [] |
2024-01-10 | cyph3rryx/ChatBot | source.py | import openai
openai.api_key = "YOUR-API"
def chatbot(prompt):
completions = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
message = completions.choices[0].text
return message
while True:
user_input = input("\nUser: ")
if user_input == "exit":
break
response = chatbot(user_input)
print("\nChatbot:", response)
| [] |
2024-01-10 | jpjacobpadilla/Mephisto-ParlAI | parlai~parlai~agents~_custom~gpt3_render.py | # -*- coding: utf-8 -*-
# =============================================================================
# Author: Yuxuan Wang
# Date: 12-06-2022
# =============================================================================
"""
This module contains a wrapper for GPT-3-based style transfer. On initialization,
it will instantiate a base generator agent and a renderer function. The renderer
function will use GPT-3 API call to perform style transfer.
"""
from typing import Callable, Dict
import os
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
import re
import json
import logging
from parlai.core.agents import Agent, create_agent, create_agent_from_shared
class Gpt3RenderAgent(Agent):
"""A wrapper for GPT3-based style transfer. On initialization, it will instantiate
a base generator agent and a renderer function. The renderer function will
use GPT3 API call to perform style transfer."""
gpt3_config_key = "gpt3"
generator_config_key = "generator"
generator_shared_key = "generator"
def __init__(self, opt, shared=None):
super().__init__(opt)
logging.info("CONFIG:\n" + json.dumps(opt, indent=True))
self.id = __class__.__name__
self.renderer_config = opt[__class__.gpt3_config_key]
self.generator_config = opt[__class__.generator_config_key]
self.renderer = self.create_renderer(self.renderer_config)
# If we want to save the history of rendered responses
# or the base response from the generator. The idea
# is to set whether the generator is aware of the style transfer
self.is_render_aware = opt.get("is_render_aware", False)
# Create generator from config file (first-time instantiation)
if shared is None:
logging.info("CREATED FROM PROTOTYPE")
self.generator = create_agent(self.generator_config)
# Create generator from shared state
else:
logging.info("CREATED FROM COPY")
self.generator = create_agent_from_shared(shared[__class__.generator_shared_key])
def share(self) -> Dict:
"""Copy response function <self.resp_fn>"""
logging.info("MODEL COPIED")
shared = super().share()
shared[__class__.generator_shared_key] = self.generator.share()
return shared
def render_aware_act(self, observation) -> Dict:
"""Render the basic generated response and make the generator
aware of the style-transferred response by calling `generator.self_observe`
to update the history of the generator."""
message = self.generator.batch_act([observation])[0]
response = self.renderer(message)
message.force_set("text", response["text"])
self.generator.self_observe(message)
logging.info(f"HISTORY: {self.generator.history.get_history_str()}")
return response
def act(self) -> Dict:
"""Forward the observation to the wrapped generator, then call the
GPT-3 renderer to perform style transfer."""
observation = self.observation
if observation is None:
return {"id": self.getID(),
"gpt3_prompt": "",
"text": "",
"base_response": ""}
# Call the wrapped generator
self.generator.observe(observation)
if (self.is_render_aware
and hasattr(self.generator, "batch_act")
and hasattr(self.generator, "history")
and hasattr(self.generator, "self_observe")):
logging.info("RENDER AWARE ACT")
return self.render_aware_act(self.generator.observation)
logging.info("NORMAL ACT")
return self.renderer(self.generator.act())
def reset(self):
"""Reset the agent, clearing its observation."""
self.observation = None
self.generator.reset()
def create_renderer(self, opt: Dict) -> Callable[[Dict], Dict]:
"""Create a GPT-3 renderer function based on the options passed in."""
import openai
def gpt_completion(s: str) -> str:
"""GPT-3 completion function. Takes a raw bot response and returns a new response.
TODO: Should allow prompt to be passed in as well, or read from a config file."""
prompt_table = {
"empathetic": (
"Here is an empathetic sentence: {I thought it was terribly depressing what these children had to go through.}"
"\nHere is another empathetic sentence: {I feel so sad for everyone especially the old and sickly seems as they are in the worst position.}"
"\nHere is another empathetic sentence: {You always want better for your kid and I think giving them up to someone else would be the best option even though it hurts. So sad. My heart just breaks for these woman.}"
"\nHere is some text: {" + s + ".} Here is a rewrite of the text, which is more empathetic: {"
),
"self-disclosure": (
"Here is a sentence of high self-disclosure: {I was always scared as a catholic to go to church as a kid and would always talk my mom out of going which probably turned out to save me in the long run.}"
"\nHere is another sentence of high self-disclosure: {I love that, personally my father went outside to smoke and never smoked with us in the car but my friends parents did and I would feel so sick after being in there car no child should suffer through it.}"
"\nHere is another sentence of high self-disclosure: {My father was in the Navy and I have two sisters in the Army.}"
"\nHere is some text: {" + s + ".} Here is a rewrite of the text, which is of higher self-disclosure: {"
)
}
prompt = prompt_table.get(opt["style"])
if prompt is None:
raise ValueError(f"Style {opt['style']} not supported. Choose from {list(prompt_table.keys())}")
# For debugging, return the prompt without calling GPT-3
if self.renderer_config.get("is_dry_run", False):
return f"[ echo render ] :: {s}", prompt
openai.api_key = OPENAI_API_KEY
completion = openai.Completion.create(prompt=prompt, **opt["generation_config"])
return completion["choices"][0]["text"].strip(), prompt
def renderer(message: Dict) -> Dict:
"""Take a raw bot response and return a rendered response using
the GPT-3 style-transfer defined above."""
def clean_token(token: str) -> str:
token = re.sub(r"_POTENTIALLY_UNSAFE__", "", token, flags=re.IGNORECASE)
return token.strip()
base_text = clean_token(message["text"])
text, prompt = gpt_completion(base_text)
response = {"id": self.getID(),
"gpt3_prompt": prompt,
"base_response": base_text,
"text": text}
return response
return renderer
| [
"{'empathetic': 'Here is an empathetic sentence: {I thought it was terribly depressing what these children had to go through.}\\nHere is another empathetic sentence: {I feel so sad for everyone especially the old and sickly seems as they are in the worst position.}\\nHere is another empathetic sentence: {You always want better for your kid and I think giving them up to someone else would be the best option even though it hurts. So sad. My heart just breaks for these woman.}\\nHere is some text: {PLACEHOLDER.} Here is a rewrite of the text, which is more empathetic: {', 'self-disclosure': 'Here is a sentence of high self-disclosure: {I was always scared as a catholic to go to church as a kid and would always talk my mom out of going which probably turned out to save me in the long run.}\\nHere is another sentence of high self-disclosure: {I love that, personally my father went outside to smoke and never smoked with us in the car but my friends parents did and I would feel so sick after being in there car no child should suffer through it.}\\nHere is another sentence of high self-disclosure: {My father was in the Navy and I have two sisters in the Army.}\\nHere is some text: {PLACEHOLDER.} Here is a rewrite of the text, which is of higher self-disclosure: {'}"
] |
2024-01-10 | Coordi777/Conditional-Diffusion-for-SAR-to-Optical-Image-Translation | guided_diffusion~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import datetime
import json
import os
import os.path as osp
import sys
import tempfile
import time
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | ZoneSixGames/RoboTalk | robark.py | # Bring in deps
from dotenv import load_dotenv
import os
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, OpenAI
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.memory import ConversationBufferMemory
import urllib.parse
from bark import generate_audio, SAMPLE_RATE, preload_models
import numpy as np
import nltk
from pydub import AudioSegment
import requests
from bs4 import BeautifulSoup
import feedparser
from datetime import datetime
# Preload Bark models
preload_models()
# Application Framework
st.title('Zone Six Podcast Creator')
# Collect the inputs
prompt = st.text_input("Enter the podcast topic")
p1_name = st.text_input("Host Name", value='Libby')
p1 = st.text_input("Enter the personality for the Host", value='liberal Democrat woman, background in international affairs and world health, mirroring viewpoints of nicole wallace, Stephen colbert, ali velshi, and christiane amanpour.')
p2_name = st.text_input("Enter second character's name:", value='Be Free')
p2 = st.text_input("Enter the personality for voice 2", value='weed smoking Libertarian bisexual woman from California who loves the environment, technology, and protecting individual freedoms from government overreach, mirroring the views of Spike Cohen, Elon Musk, Carl Sagan, and joe rogan.')
p3_name = st.text_input("Enter third character's name:", value='Q Anon 42')
p3 = st.text_input("Enter the personality for voice 3", value='right wing conservative Republican from Florida, follows government conspiracies on 4chan and reddit, mirroring the views of Milo Yiannopoulos, Ben Shapiro, Steve Bannon, and Tucker Carlson.')
p1_NAME = p1_name.upper()
p2_NAME = p2_name.upper()
p3_NAME = p3_name.upper()
# Map character names to voices
VOICE_MAP = {
p1_name: "v2/en_speaker_1", # host voice
p2_name: "v2/en_speaker_2", # guest1 voice
p3_name: "v2/en_speaker_3", # guest2 voice
}
PODCAST_DIR = None
# Load up the entries as environment variables
load_dotenv()
# Access the environment variables
API_KEYS = {
'OPENAI_API_KEY': st.text_input("Enter your OpenAI API key"),
'GOOGLE_CSE_ID': st.text_input("Enter your Custom Search Engine ID (CSE) key"),
'GOOGLE_API_KEY': st.text_input("Enter your Google API key"),
}
# Initialize environment
os.environ.update(API_KEYS)
# Initialize components
google_search_tool = GoogleSearchAPIWrapper()
# Initialize OpenAI API
openai_llm = OpenAI(model_name="gpt-3.5-turbo-16k") # Initialize the OpenAI LLM
# Define templates
title = PromptTemplate.from_template("Write a witty, funny, or ironic podcast title about {topic}.")
script = PromptTemplate.from_template("Write a podcast script based on a given title, research, and unique personalities. Title: {title}, Research: {news_research}, Personalities: Host: {p1_NAME}: {p1}, First Guest: {p2_NAME}: {p2}, Second Guest: {p3_NAME}: {p3}. The podcast should start with the Host giving an introduction and continue with the guest speakers as follows: {p1_NAME}: content n/ {p2_NAME}: Content n/ {p3_NAME}: content n/ and so on, replacing the host and guest names with the input names")
cont_script = PromptTemplate.from_template("Continue writing a podcast script based on a given title, research, recent podcast discussion history. Title: {title}, Research: {research}, Script: {script}")
news = PromptTemplate.from_template("Summarize this news story: {story}")
# Initialize chains
chains = {
'title': LLMChain(llm=openai_llm, prompt=title, verbose=True, output_key='title'),
'script': LLMChain(llm=openai_llm, prompt=script, verbose=True, output_key='script'),
'cont_script': LLMChain(llm=openai_llm, prompt=cont_script, verbose=True, output_key='cont_script'),
'news': LLMChain(llm=openai_llm, prompt=news, verbose=True, output_key='summary'),
}
# Initialize session state for script, research, title if they doesn't exist
if 'script' not in st.session_state:
st.session_state.script = "Script will appear here"
if 'title' not in st.session_state:
st.session_state.title = "Podcast Title Will Appear Here"
if 'cont_script' not in st.session_state:
st.session_state.cont_script = ""
if 'news' not in st.session_state:
st.session_state.news = ""
if 'research' not in st.session_state:
st.session_state.research = ""
if 'podcast_dir' not in st.session_state:
st.session_state.podcast_dir = ""
#Define the functions
def extract_news_text(url):
nltk.download('punkt')
#"""Extract the text of a news story given its URL."""
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
paragraphs = soup.find_all('p')
# Concatenate all paragraphs into a single string
story_text = ' '.join([p.get_text() for p in paragraphs])
# Tokenize the story_text
tokens = nltk.word_tokenize(story_text)
# Only keep the first 4000 tokens
tokens = tokens[:4000]
# Rejoin the tokens into a single string
story_text = ' '.join(tokens)
return story_text
def get_top_news_stories(topic, num_stories=5):
"""Get the top num_stories news stories on the given topic."""
# URL encode the topic to ensure it's valid in a URL
topic = urllib.parse.quote_plus(topic)
# Get the feed from the Google News RSS
feed = feedparser.parse(f'https://news.google.com/rss/search?q={topic}')
# Return the top num_stories stories
return feed.entries[:num_stories]
def summarize_news_stories(stories):
"""Summarize each news story using the OpenAI model."""
summaries = []
total_tokens = 0
for story in stories:
# Extract the URL from the story metadata
url = story.get('link', '')
if url:
# Extract the news text
story_text = extract_news_text(url)
# Generate a summary
summary = chains['news'].run(story_text)
# Add summary to the list if it doesn't exceed the token limit
summary_tokens = len(summary.split()) # rough token count
if total_tokens + summary_tokens <= 10000:
summaries.append(summary)
total_tokens += summary_tokens
else:
break # stop if we reach the token limit
return summaries
def create_podcast_directory():
global PODCAST_DIR
now = datetime.now() # get current date and time
date_time = now.strftime("%Y_%m_%d_%H_%M_%S") # format as a string
podcast_dir = f"Podcast_{date_time}" # prepend "Podcast_" to the string
if not os.path.exists(podcast_dir):
os.makedirs(podcast_dir)
PODCAST_DIR = podcast_dir
return PODCAST_DIR # Add this line
def convert_comments_to_audio(comments):
"""Generate audio for each comment in the script."""
audio_files = []
silence = np.zeros(int(0.5*SAMPLE_RATE))
for comment in comments:
voice_id = VOICE_MAP[comment['role']]
audio_array = generate_audio(comment['text'], history_prompt=voice_id) # Use Bark's generate_audio
audio_file = f"{st.session_state.podcast_dir}/{comment['role']}_{comment['order']}.mp3" # Save in podcast directory
audio_array.export(audio_file, format="mp3") # Export as mp3
audio_files.append(audio_file)
return audio_files
def parse_script(script):
comments = []
lines = script.split('\n')
for i, line in enumerate(lines):
if ':' in line:
role, content = line.split(':', 1)
if role and content:
role = role.strip().upper() # capitalize role
comments.append({'role': role, 'text': content.strip(), 'order': i})
return comments
def validate_inputs(prompt, p1, p2, p3):
return all([prompt, p1, p2, p3])
def combine_audio_files(audio_files):
combined = AudioSegment.empty()
for audio_file in sorted(audio_files):
segment = AudioSegment.from_mp3(audio_file)
combined += segment
return combined
#Operational Structure
if st.button('Generate Script') and validate_inputs(prompt, p1, p2, p3):
# Research and summarize top news stories
stories = get_top_news_stories(prompt)
news_summaries = summarize_news_stories(stories)
st.session_state.research = ' '.join(news_summaries) # Join the list of summaries into a single string
# Generate title
title_result = chains['title'].run(topic=prompt)
st.session_state.title = title_result # Saving title directly to session state.
# Generate and display initial script
script_result = chains['script'].run(
title=st.session_state.title,
news_research=st.session_state.research,
p1_NAME=p1_NAME,
p2_NAME=p2_NAME,
p3_NAME=p3_NAME,
p1=p1,
p2=p2,
p3=p3
)
st.session_state.script = script_result
# Save the script in the session state and to a text file
st.session_state.podcast_dir = create_podcast_directory()
with open(f"{st.session_state.podcast_dir}/podcast_script.txt", 'w') as f:
f.write(st.session_state.script)
st.success(f"Script saved in {st.session_state.podcast_dir}/podcast_script.txt")
with open(f"{st.session_state.podcast_dir}/podcast_research.txt", 'w') as f:
f.write(st.session_state.research)
st.success(f"Research saved in {st.session_state.podcast_dir}/podcast_research.txt")
if st.button('Continue Script') and validate_inputs(prompt, p1, p2, p3):
# Generate and display initial script
script_result = chains['cont_script'].run(
title=st.session_state.title,
research=st.session_state.research,
script=st.session_state.script
)
st.session_state.script += str(script_result)
# Save the script in the session state and to a text file
with open(f"{st.session_state.podcast_dir}/podcast_script.txt", 'w') as f:
f.write(str(st.session_state.script))
st.success(f"Script saved in {st.session_state.podcast_dir}/podcast_script.txt")
# Download script
st.download_button("Download Script", data='\n'.join(st.session_state.script), file_name='podcast_script.txt', mime='text/plain')
# Display script from session state
st.write(f'Title: {st.session_state.title}')
st.write(f'Script: \n{st.session_state.script}')
st.write(f'\n{st.session_state.cont_script}')
print(st.session_state.podcast_dir)
if st.button('Create Voices') and st.session_state.script:
comments = parse_script('\n'.join(st.session_state.script))
st.session_state['audio_files'] = convert_comments_to_audio(comments)
for i, audio_file in enumerate(st.session_state['audio_files']):
st.audio(f"{st.session_state.podcast_dir}/podcast.mp3", format='audio/mp3')
if st.button('Combine Audio') and st.session_state.script:
combined_audio = combine_audio_files(st.session_state['audio_files'])
combined_audio.export(f"{st.session_state.podcast_dir}/complete_podcast.mp3", format='mp3')
st.audio(f"{st.session_state.podcast_dir}/complete_podcast.mp3", format='audio/mp3')
if st.button('Download Podcast') and os.path.exists(f"{st.session_state.podcast_dir}/complete_podcast.mp3"):
with open(f"{st.session_state.podcast_dir}/complete_podcast.mp3", 'rb') as f:
bytes = f.read()
st.download_button("Download Podcast", data=bytes, file_name=f"{st.session_state.podcast_dir}/complete_podcast.mp3", mime='audio/mpeg')
with st.expander('News Summaries'):
st.write(st.session_state.research)
with st.expander('Script'):
st.write(st.session_state.title)
st.write(st.session_state.script)
st.write(st.session_state.cont_script)
| [
"Write a witty, funny, or ironic podcast title about {topic}.",
"Write a podcast script based on a given title, research, and unique personalities. Title: {title}, Research: {news_research}, Personalities: Host: {p1_NAME}: {p1}, First Guest: {p2_NAME}: {p2}, Second Guest: {p3_NAME}: {p3}. The podcast should start with the Host giving an introduction and continue with the guest speakers as follows: {p1_NAME}: content n/ {p2_NAME}: Content n/ {p3_NAME}: content n/ and so on, replacing the host and guest names with the input names",
"Continue writing a podcast script based on a given title, research, recent podcast discussion history. Title: {title}, Research: {research}, Script: {script}",
"Summarize this news story: {story}",
"Enter the podcast topic"
] |
2024-01-10 | ZoneSixGames/RoboTalk | robotalk.py | # Bring in deps
from decouple import config
import os
import streamlit as st
import requests
from bs4 import BeautifulSoup
from langchain import LLMChain, OpenAI # Import the correct class
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from langchain.utilities import GoogleSearchAPIWrapper
from elevenlabs import generate, save, voices
import urllib.parse
import feedparser
from datetime import datetime
from pydub import AudioSegment
import nltk
# Access the environment variables
API_KEYS = {
'OPENAI_API_KEY': config('OPENAI_API_KEY'),
'ELEVENLABS_API_KEY': config('ELEVENLABS_API_KEY'),
'ELEVENLABS_VOICE_1_ID': config('ELEVENLABS_VOICE_1_ID'),
'ELEVENLABS_VOICE_2_ID': config('ELEVENLABS_VOICE_2_ID'),
'ELEVENLABS_VOICE_3_ID': config('ELEVENLABS_VOICE_3_ID'),
'ELEVENLABS_VOICE_4_ID': config('ELEVENLABS_VOICE_4_ID'),
'ELEVENLABS_VOICE_5_ID': config('ELEVENLABS_VOICE_5_ID'),
'ELEVENLABS_VOICE_6_ID': config('ELEVENLABS_VOICE_6_ID'),
'ELEVENLABS_VOICE_7_ID': config('ELEVENLABS_VOICE_7_ID'),
'ELEVENLABS_VOICE_8_ID': config('ELEVENLABS_VOICE_8_ID'),
'GOOGLE_CSE_ID': config('CUSTOM_SEARCH_ENGINE_ID'),
'GOOGLE_API_KEY': config('GOOGLE_API_KEY'),
}
# Application Framework
st.title('RoboTalk Podcast Creator by Zone Six')
# Collect the inputs
prompt = st.text_input("Enter the podcast topic")
p1_name = st.text_input("Host Name")
p1 = st.text_input("Enter the personality for the Host")
# Initialize environment
os.environ.update(API_KEYS)
# Initialize components
google_search_tool = GoogleSearchAPIWrapper()
# Initialize OpenAI API
openai_llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k") # Initialize the OpenAI LLM
# Define templates
title = PromptTemplate.from_template("Write a witty, funny, or ironic podcast title about {topic}.")
script = PromptTemplate.from_template("Write a first person editorial podcast based on a given title, research, and unique author personality. Title: {title}, Research: {news_research}, Personality: {p1_name}: {p1}. The article should start by giving an introduction to the topic and then offering an opinion based on the personality of the author. Do not use formal words like 'in conclusion' or 'however' or 'furthermore'.")
# cont_script = PromptTemplate.from_template("Continue writing a podcast script based on a given title, research, recent podcast discussion history. Title: {title}, Research: {research}, Script: {script}")
news = PromptTemplate.from_template("Summarize this news story: {story}")
research = PromptTemplate.from_template("Summarize the research into talking points: {research}")
# Initialize chains
chains = {
'title': LLMChain(llm=openai_llm, prompt=title, verbose=True, output_key='title'),
'script': LLMChain(llm=openai_llm, prompt=script, verbose=True, output_key='script'),
# 'cont_script': LLMChain(llm=openai_llm, prompt=cont_script, verbose=True, output_key='cont_script'),
'news': LLMChain(llm=openai_llm, prompt=news, verbose=True, output_key='summary'),
'research': LLMChain(llm=openai_llm, prompt=research, verbose=True, output_key='research'),
}
# Initialize session state for script, research, title if they don't exist
if 'script' not in st.session_state:
st.session_state.script = "Script will appear here"
if 'title' not in st.session_state:
st.session_state.title = "Podcast Title Will Appear Here"
if 'news' not in st.session_state:
st.session_state.news = ""
if 'research' not in st.session_state:
st.session_state.research = ""
if 'podcast_dir' not in st.session_state:
st.session_state.podcast_dir = ""
def extract_news_text(url):
"""Extract the text of a news story given its URL."""
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
paragraphs = soup.find_all('p')
# Concatenate all paragraphs into a single string
story_text = ' '.join([p.get_text() for p in paragraphs])
# Tokenize the story_text
tokens = nltk.word_tokenize(story_text)
# Only keep the first XXXX tokens
tokens = tokens[:2800]
# Rejoin the tokens into a single string
story_text = ' '.join(tokens)
return story_text
def get_top_news_stories(topic, num_stories=5):
"""Get the top num_stories news stories on the given topic."""
# URL encode the topic to ensure it's valid in a URL
topic = urllib.parse.quote_plus(topic)
# Get the feed from the Google News RSS
feed = feedparser.parse(f'https://news.google.com/rss/search?q={topic}')
# Return the top num_stories stories
return feed.entries[:num_stories]
def summarize_news_stories(stories):
"""Summarize each news story using the OpenAI model."""
summaries = []
total_tokens = 0
for story in stories:
# Extract the URL from the story metadata
url = story.get('link', '')
if url:
# Extract the news text
story_text = extract_news_text(url)
# Generate a summary
summary = chains['news'].run(story_text)
# Add summary to the list if it doesn't exceed the token limit
summary_tokens = len(summary.split()) # rough token count
if total_tokens + summary_tokens <= 10000:
summaries.append(summary)
total_tokens += summary_tokens
else:
break # stop if we reach the token limit
return summaries
def validate_inputs(prompt, p1, p1_name):
return all([prompt, p1, p1_name])
def create_podcast_directory():
now = datetime.now() # get current date and time
date_time = now.strftime("%Y_%m_%d_%H_%M_%S") # format as a string
podcast_dir = f"Podcast_{date_time}" # prepend "Podcast_" to the string
if not os.path.exists(podcast_dir):
os.makedirs(podcast_dir)
return podcast_dir
def convert_script_to_audio(script_text, podcast_dir):
selected_voice_id = API_KEYS.get(voice_options[selected_voice])
print(selected_voice_id) # Add this line to check the selected voice ID
if selected_voice_id is None:
st.error("Selected voice not found.")
return []
audio = generate(text=script_text, api_key=API_KEYS['ELEVENLABS_API_KEY'], voice=selected_voice_id)
audio_file = f"{podcast_dir}/podcast.mp3" # Save in podcast directory
save(audio=audio, filename=audio_file)
print(audio_file) # Add this line to check the audio file path
return [audio_file] # Return a list with one audio file
# Operational Structure
if st.button('Research') and validate_inputs(prompt, p1, p1_name):
# Research and summarize top news stories
stories = get_top_news_stories(prompt)
news_summaries = summarize_news_stories(stories)
research_summary = chains['research'].run(research=' '.join(news_summaries)) # Use the research chain
st.session_state.research = research_summary # Store the research summary in the session state
st.session_state.podcast_dir = create_podcast_directory()
with open(f"{st.session_state.podcast_dir}/podcast_research.txt", 'w') as f:
f.write(st.session_state.research)
st.success(f"Research saved in {st.session_state.podcast_dir}/podcast_research.txt")
if st.button('Generate Script') and validate_inputs(prompt, p1, p1_name):
# Generate title
title_result = chains['title'].run(topic=prompt)
st.session_state.title = title_result
# Generate and display initial script
script_result = chains['script'].run(
title=st.session_state.title,
news_research=st.session_state.research, # Use the research summary
p1_name=p1_name,
p1=p1,
)
st.session_state.script = script_result
# Display and edit the script
edited_script = st.text_area('Edit the Script', st.session_state.script, key='edit_script', height=300)
# Check if the script has been modified
if edited_script != st.session_state.script:
st.session_state.script = edited_script
# Save the edited script to the session state and to a text file
if st.button('Save Script') and 'edit_script' in st.session_state:
edited_script = st.session_state.edit_script
# Update the session state with the edited script
st.session_state.script = edited_script
# Save the edited script to the text file
with open(f"{st.session_state.podcast_dir}/podcast_script.txt", 'w') as f:
f.write(edited_script)
st.success(f"Edited script saved in {st.session_state.podcast_dir}/podcast_script.txt")
# Display the script from the session state
st.write(f'Script: \n{st.session_state.script}')
# Define the available voice options
voice_options = {
'Voice 1': 'ELEVENLABS_VOICE_1_ID',
'Voice 2': 'ELEVENLABS_VOICE_2_ID',
'Voice 3': 'ELEVENLABS_VOICE_3_ID',
'Voice 4': 'ELEVENLABS_VOICE_4_ID',
'Voice 5': 'ELEVENLABS_VOICE_5_ID',
'Voice 6': 'ELEVENLABS_VOICE_6_ID',
'Voice 7': 'ELEVENLABS_VOICE_7_ID',
'Voice 8': 'ELEVENLABS_VOICE_8_ID',
}
# Allow the user to choose a voice
selected_voice = st.selectbox("Select a voice", list(voice_options.keys()))
if st.button('Create Podcast') and st.session_state.script:
audio_files = convert_script_to_audio(st.session_state.script, st.session_state.podcast_dir)
if audio_files:
st.audio(audio_files[0], format='audio/mp3') # Use audio_files directly
with st.expander('News Summaries'):
st.write(st.session_state.research)
with st.expander('Script'):
st.write(st.session_state.title)
st.write(st.session_state.script)
| [
"Summarize the research into talking points: {research}",
"Write a witty, funny, or ironic podcast title about {topic}.",
"Write a first person editorial podcast based on a given title, research, and unique author personality. Title: {title}, Research: {news_research}, Personality: {p1_name}: {p1}. The article should start by giving an introduction to the topic and then offering an opinion based on the personality of the author. Do not use formal words like 'in conclusion' or 'however' or 'furthermore'.",
"Summarize this news story: {story}",
"Enter the podcast topic"
] |
2024-01-10 | mlfoundations/model-soups | zeroshot.py | import argparse
import os
import torch
import clip
import os
from tqdm import tqdm
import datasets
from utils import ModelWrapper, test_model_on_dataset
from openai_imagenet_template import openai_imagenet_template
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-location",
type=str,
default=os.path.expanduser('~/data'),
help="The root directory for the datasets.",
)
parser.add_argument(
"--model-location",
type=str,
default=os.path.expanduser('~/ssd/checkpoints/soups'),
help="Where to download the models.",
)
parser.add_argument(
"--batch-size",
type=int,
default=512,
)
parser.add_argument(
"--custom-template", action="store_true", default=False,
)
parser.add_argument(
"--dataset", default="ImageNet",
help=f"Must be one of {','.join(['ImageNet', 'ImageNetV2', 'ImageNetR', 'ObjectNet', 'ImageNetA'])}"
)
parser.add_argument(
"--workers",
type=int,
default=4,
)
parser.add_argument(
"--model",
default='ViT-B/32',
help='Model to use -- you can try another like ViT-L/14'
)
return parser.parse_args()
def zeroshot_classifier(model, classnames, templates, device):
print('Building zero-shot classifier.')
with torch.no_grad():
zeroshot_weights = []
for classname in tqdm(classnames):
texts = [template(classname) for template in templates] #format with class
texts = clip.tokenize(texts).to(device) #tokenize
class_embeddings = model.encode_text(texts)
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
return 100*zeroshot_weights.t()
if __name__ == '__main__':
args = parse_arguments()
DEVICE = 'cuda'
assert args.dataset in ['ImageNet', 'ImageNetV2', 'ImageNetR', 'ObjectNet', 'ImageNetA']
if args.custom_template:
template = [lambda x : f"a photo of a {x}."]
else:
template = openai_imagenet_template
base_model, preprocess = clip.load(args.model, 'cuda', jit=False)
dset = getattr(datasets, args.dataset)(preprocess, location=args.data_location, batch_size=args.batch_size, num_workers=args.workers)
clf = zeroshot_classifier(base_model, dset.classnames, template, DEVICE)
NUM_CLASSES = len(dset.classnames)
feature_dim = base_model.visual.output_dim
model = ModelWrapper(base_model, feature_dim, NUM_CLASSES, normalize=True, initial_weights=clf)
for p in model.parameters():
p.data = p.data.float()
model = model.cuda()
devices = [x for x in range(torch.cuda.device_count())]
model = torch.nn.DataParallel(model, device_ids=devices)
accuracy = test_model_on_dataset(model, dset)
print(f'Accuracy is {round(100 * accuracy, 2)}.')
| [
"[<function <lambda> at 0x116525bc0>]"
] |
2024-01-10 | mlfoundations/model-soups | finetune.py | import argparse
import os
import torch
import clip
import os
from tqdm import tqdm
import time
from timm.data.transforms_factory import transforms_imagenet_train
from datasets.imagenet import ImageNet98p, ImageNet
from utils import ModelWrapper, maybe_dictionarize_batch, cosine_lr
from zeroshot import zeroshot_classifier
from openai_imagenet_template import openai_imagenet_template
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-location",
type=str,
default=os.path.expanduser('~/data'),
help="The root directory for the datasets.",
)
parser.add_argument(
"--model-location",
type=str,
default=os.path.expanduser('~/ssd/checkpoints/soups'),
help="Where to download the models.",
)
parser.add_argument(
"--batch-size",
type=int,
default=256,
)
parser.add_argument(
"--custom-template", action="store_true", default=False,
)
parser.add_argument(
"--workers",
type=int,
default=8,
)
parser.add_argument(
"--epochs",
type=int,
default=8,
)
parser.add_argument(
"--warmup-length",
type=int,
default=500,
)
parser.add_argument(
"--lr",
type=float,
default=2e-5,
)
parser.add_argument(
"--wd",
type=float,
default=0.1,
)
parser.add_argument(
"--model",
default='ViT-B/32',
help='Model to use -- you can try another like ViT-L/14'
)
parser.add_argument(
"--name",
default='finetune_cp',
help='Filename for the checkpoints.'
)
parser.add_argument(
"--timm-aug", action="store_true", default=False,
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
DEVICE = 'cuda'
if args.custom_template:
template = [lambda x : f"a photo of a {x}."]
else:
template = openai_imagenet_template
base_model, preprocess = clip.load(args.model, 'cuda', jit=False)
# 98p is the 98% of ImageNet train set that we train on -- the other 2% is hodl-out val.
if args.timm_aug:
train_preprocess = transforms_imagenet_train(
img_size=base_model.visual.input_resolution,
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)
)
else:
train_preprocess = preprocess
train_dset = ImageNet98p(train_preprocess, location=args.data_location, batch_size=args.batch_size, num_workers=args.workers)
test_dset = ImageNet(preprocess, location=args.data_location, batch_size=args.batch_size, num_workers=args.workers)
clf = zeroshot_classifier(base_model, train_dset.classnames, template, DEVICE)
NUM_CLASSES = len(train_dset.classnames)
feature_dim = base_model.visual.output_dim
model = ModelWrapper(base_model, feature_dim, NUM_CLASSES, normalize=True, initial_weights=clf)
for p in model.parameters():
p.data = p.data.float()
model = model.cuda()
devices = [x for x in range(torch.cuda.device_count())]
model = torch.nn.DataParallel(model, device_ids=devices)
model_parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.AdamW(model_parameters, lr=args.lr, weight_decay=args.wd)
num_batches = len(train_dset.train_loader)
scheduler = cosine_lr(optimizer, args.lr, args.warmup_length, args.epochs * num_batches)
loss_fn = torch.nn.CrossEntropyLoss()
model_path = os.path.join(args.model_location, f'{args.name}_0.pt')
print('Saving model to', model_path)
torch.save(model.module.state_dict(), model_path)
for epoch in range(args.epochs):
# Train
model.train()
end = time.time()
for i, batch in enumerate(train_dset.train_loader):
step = i + epoch * num_batches
scheduler(step)
optimizer.zero_grad()
batch = maybe_dictionarize_batch(batch)
inputs, labels = batch['images'].to(DEVICE), batch['labels'].to(DEVICE)
data_time = time.time() - end
logits = model(inputs)
loss = loss_fn(logits, labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
batch_time = time.time() - end
end = time.time()
if i % 20 == 0:
percent_complete = 100.0 * i / len(train_dset.train_loader)
print(
f"Train Epoch: {epoch} [{percent_complete:.0f}% {i}/{len(train_dset.train_loader)}]\t"
f"Loss: {loss.item():.6f}\tData (t) {data_time:.3f}\tBatch (t) {batch_time:.3f}", flush=True
)
# #Evaluate
test_loader = test_dset.test_loader
model.eval()
with torch.no_grad():
print('*'*80)
print('Starting eval')
correct, count = 0.0, 0.0
pbar = tqdm(test_loader)
for batch in pbar:
batch = maybe_dictionarize_batch(batch)
inputs, labels = batch['images'].to(DEVICE), batch['labels'].to(DEVICE)
logits = model(inputs)
loss = loss_fn(logits, labels)
pred = logits.argmax(dim=1, keepdim=True)
correct += pred.eq(labels.view_as(pred)).sum().item()
count += len(logits)
pbar.set_description(
f"Val loss: {loss.item():.4f} Acc: {100*correct/count:.2f}")
top1 = correct / count
print(f'Val acc at epoch {epoch}: {100*top1:.2f}')
model_path = os.path.join(args.model_location, f'{args.name}_{epoch + 1}.pt')
print('Saving model to', model_path)
torch.save(model.module.state_dict(), model_path)
| [
"[<function <lambda> at 0x1163bfb00>]"
] |
2024-01-10 | agermanidis/OpenGPT-2 | sample~encoder.py |
"""Byte pair encoding utilities
Some functions are adapted from OpenAI but with modifications
https://github.com/openai/gpt-2
"""
import os
import json
import regex as re
from functools import lru_cache
import tensorflow as tf
import random
import numpy as np
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
# self.encoder = {k: v + 1 for k, v in encoder.items()}
# self.encoder['<|padding|>'] = 0
# self.padding = 0
#
# del self.encoder['<|endoftext|>']
#
# for special_token_type in ['domain', 'date', 'authors', 'title', 'article', 'summary']:
# setattr(self, f'begin_{special_token_type}', len(self.encoder))
# self.encoder[f'<|begin{special_token_type}|>'] = len(self.encoder)
#
# setattr(self, f'end_{special_token_type}', len(self.encoder))
# self.encoder[f'<|endof{special_token_type}|>'] = len(self.encoder)
#
# # This will be used if we want to combine short articles.
# self.reset_context = len(self.encoder)
# self.encoder['<|resetcontext|>'] = len(self.encoder)
self.encoder = encoder
self.endoftext = self.encoder['<|endoftext|>']
################################## END OF SPECIAL TOKENS TO ADD
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def __len__(self):
return len(self.encoder)
@property
def special_tokens_onehot(self):
""" Return the IDs of all special tokens"""
return [(self.decoder[i].startswith('<|') and self.decoder[i].endswith('|>')) for i in range(len(self))]
def get_encoder():
directory_name = os.path.dirname(__file__)
with open(os.path.join(directory_name, 'encoder.json'), 'r') as f:
encoder = json.load(f)
with open(os.path.join(directory_name, 'vocab.bpe'), 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
##############################################################
# TURN SOMETHING INTO THE RIGHT FORMAT FOR AN EXAMPLE
##############################################################
def _tokenize_article_pieces(encoder, item):
"""
Turn the article into tokens
NOTE: in hindsight I kinda messed up here because the first token is always represented as a BPE continuation
rather than an initial token in its own right. whoops....
:param item: Contains things that need to be tokenized
fields are ['domain', 'date', 'authors', 'title', 'article', 'summary']
:return: dict
"""
# article_pieces = {
# 'article': [encoder.begin_article] + encoder.encode(item['text']) + [encoder.end_article],
# 'domain': [encoder.begin_domain] + encoder.encode(item['domain']) + [encoder.end_domain],
# 'title': [encoder.begin_title] + encoder.encode(item['title']) + [encoder.end_title],
# }
# # 4/6: Attach the summary too, why the hell not
# if item['summary'] and len(item['summary']) > 50:
# article_pieces['summary'] = [encoder.begin_summary] + encoder.encode(item['summary']) + [encoder.end_summary]
#
# # 5/6: date
# date_split = item['publish_date'].split('-')
# assert len(date_split) == 3
# assert date_split[0].isdigit()
#
# date_txt = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
# 'August', 'September', 'October', 'November', 'December'][int(date_split[0]) - 1] + ' {}, {}'.format(
# date_split[1], date_split[2])
# article_pieces['date'] = [encoder.begin_date] + encoder.encode(date_txt) + [encoder.end_date]
#
# # 6/6: authors
# authors = ', '.join(item['authors'])
# if len(authors) > 5:
# article_pieces['authors'] = [encoder.begin_authors] + encoder.encode(authors) + [encoder.end_authors]
return encoder.encode(item) + [encoder.endoftext]
def _cut_tokens_to_add_stuff(tokens, stuff_to_add, desired_size, padding_token):
"""
The idea behind this function is to take away tokens from `tokens' such that tokens[:LENGTH] + stuff_to_add becomes
exactly at the right size (desired_size).
:param tokens:
:param stuff_to_add:
:param desired_size:
:return:
"""
if len(tokens) >= desired_size:
return tokens
# no way we can add this stuff
if len(stuff_to_add) >= desired_size:
return tokens
if (len(tokens) + len(stuff_to_add)) <= desired_size:
return tokens + stuff_to_add
# Otherwise we'll have to actually cut
tokens = tokens[:(desired_size - len(stuff_to_add) - 1)]
tokens.append(padding_token)
tokens.extend(stuff_to_add)
return tokens
def tokenize_for_grover_training(encoder, item, desired_size=1024, unconditional_prob=0.35, metadata_dropout_prob=0.1,
cut_prob=0.2):
"""
Not only will we tokenize an item with a BPE encoder, but we'll also put it in a nice format for language modeling.
The goal is to MINIMIZE PADDING. If we don't fill up the desired size of 1024 tokens then we're wasting compute.
The canonical order is
DOMAIN DATE AUTHORS TITLE ARTICLE SUMMARY
:param encoder:
:param item: Contains things like
{"url": "https://www.advocate.com/node/1010911",
"timestamp": "20180118211607",
"url_used": "https://web.archive.org/web/20180118211607id_/https://www.advocate.com/node/1010911",
"domain": "advocate.com",
"title": "Report: One-Third of Trump's Judicial Picks Are Anti-LGBT",
"text": ....
"summary": ....
"authors": list
"publish_date": ...
}
:param desired_size: the goal for how long the span will be
:param unconditional_prob: The probability that we will generate JUST THE TEXT first.
:param metadata_dropout_prob: The probability that we will drop out each item of metadata
:param cut_prob: The probability that, if we're already over the desired size, we'll cut the article and start
predicting metadata before the desired_size window ends.
:return:
"""
# Get all the bits and pieces
tokens = _tokenize_article_pieces(encoder, item)
# canonical_metadata_order = ['domain', 'date', 'authors', 'title']
#
# # unconditional_prob is probability we only generate the text first, without any metadata
# switch = random.random()
# if switch < unconditional_prob:
# assignments = {'article': 'a'}
# chunk_a = article_pieces.pop('article')
# chunk_b = []
# for x in canonical_metadata_order + ['summary']:
# if random.random() > metadata_dropout_prob:
# chunk_b.extend(article_pieces.pop(x, []))
# assignments[x] = 'b'
# elif switch < 0.5:
# # Put everything in chunk_a, without dropout
# assignments = {}
# chunk_a = []
# chunk_b = []
# for x in canonical_metadata_order + ['article', 'summary']:
# chunk_a.extend(article_pieces.pop(x, []))
# assignments[x] = 'a'
# else:
# assignments = {}
# chunk_a = []
# chunk_b = []
# for k in canonical_metadata_order + ['article', 'summary']:
# if random.random() < metadata_dropout_prob and k not in ('article', 'title'):
# pass
# elif random.random() < 0.5:
# if k != 'summary':
# chunk_a.extend(article_pieces.pop(k, []))
# assignments[k] = 'a'
# else:
# chunk_b.extend(article_pieces.pop(k, []))
# assignments[k] = 'b'
#
# if (len(chunk_a) + len(chunk_b)) <= desired_size:
# return chunk_a + chunk_b
#
# if (assignments.get('article', '') == 'a') and (len(chunk_b) > 0) and (random.random() < cut_prob):
# return _cut_tokens_to_add_stuff(chunk_a, chunk_b, desired_size, encoder.padding)
#
# tokens = chunk_a + chunk_b
return tokens
def detokenize(encoder, tokens):
return encoder.decode(tokens)
#######################################
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def sliding_window(article, max_seq_length):
"""
Randomly sample some spans. It's a simple approximation of sliding window
:param tokens:
:param max_seq_length:
:return:
"""
# if it's shorter, no need for this
if len(article['input_ids']) <= max_seq_length:
amount_to_pad = max_seq_length - len(article['input_ids'])
yield article
return
num_spans = len(article['input_ids']) - max_seq_length + 1
weights = np.ones(num_spans, dtype=np.float32)
# weights[0] = max_seq_length
weights /= weights.sum()
num_to_yield = int(0.5 + len(article['input_ids']) / max_seq_length)
starts = np.random.choice(num_spans, size=num_to_yield, replace=False, p=weights)
input_ids = article.pop('input_ids')
for i in starts.tolist():
article['input_ids'] = input_ids[i:(i + max_seq_length)]
yield article
# def sliding_window(article, max_seq_length, pad_token):
# """
# Randomly sample some spans. It's a simple approximation of sliding window
# :param tokens:
# :param max_seq_length:
# :return:
# """
# # if it's shorter, no need for this
# if len(article['input_ids']) <= max_seq_length:
# amount_to_pad = max_seq_length - len(article['input_ids'])
# article['input_ids'].extend([pad_token] * amount_to_pad)
# yield article
# return
#
# num_spans = len(article['input_ids']) - max_seq_length + 1
# weights = np.ones(num_spans, dtype=np.float32)
# # weights[0] = max_seq_length
# weights /= weights.sum()
#
# num_to_yield = int(0.5 + len(article['input_ids']) / max_seq_length)
# starts = np.random.choice(num_spans, size=num_to_yield, replace=False, p=weights)
#
# input_ids = article.pop('input_ids')
# for i in starts.tolist():
# article['input_ids'] = input_ids[i:(i + max_seq_length)]
# yield article
def format_context(encoder, news_article, target):
"""
Generates a news article given some partial information
:param news_article: Contains context
:param target: What we want to get an answer for.
:return:
"""
canonical_metadata_order = ['domain', 'date', 'authors', 'title', 'article']
tokens = []
for metadata_category in canonical_metadata_order:
metadata = news_article.get(metadata_category, '').strip()
# This MIGHT BE needed because I think during training time we never saw empty articles
# if metadata or ((metadata_category == 'article') and target != 'article'):
if (metadata_category == 'article') and (target != 'article'):
metadata = news_article.get('title', '') # Just copy from the title maybe?
if metadata:
tokens.append(encoder.__dict__[f'begin_{metadata_category}'])
tokens.extend(encoder.encode(metadata))
tokens.append(encoder.__dict__[f'end_{metadata_category}'])
assert target in (canonical_metadata_order + ['summary'])
tokens.append(encoder.__dict__[f'begin_{target}'])
return tokens
def extract_generated_target(output_tokens, encoder, target):
"""
Given some tokens that were generated, extract the target
:param output_tokens: [num_tokens] thing that was generated
:param encoder: how they were encoded
:param target: the piece of metadata we wanted to generate!
:return:
"""
# Filter out first instance of start token
assert output_tokens.ndim == 1
start_ind = 0
end_ind = output_tokens.shape[0]
return {
'extraction': encoder.decode(output_tokens[start_ind:end_ind]),
'start_ind': start_ind,
'end_ind': end_ind,
}
# def extract_generated_target(output_tokens, encoder, target):
# """
# Given some tokens that were generated, extract the target
# :param output_tokens: [num_tokens] thing that was generated
# :param encoder: how they were encoded
# :param target: the piece of metadata we wanted to generate!
# :return:
# """
# # Filter out first instance of start token
# assert output_tokens.ndim == 1
#
# start_tokens = output_tokens == encoder.__dict__[f'begin_{target}']
# if np.any(start_tokens):
# start_ind = np.argmax(start_tokens) + 1
# else:
# start_ind = 0
#
# end_tokens = output_tokens == encoder.__dict__[f'end_{target}']
# if np.any(end_tokens):
# end_ind = np.argmax(end_tokens)
# else:
# end_ind = output_tokens.shape[0]
#
# return {
# 'extraction': encoder.decode(output_tokens[start_ind:end_ind]),
# 'start_ind': start_ind,
# 'end_ind': end_ind,
# }
if __name__ == '__main__':
encoder = get_encoder()
print("VOCAB SIZE IS {}".format(len(encoder.encoder)))
| [] |
2024-01-10 | jakderrida/AgentGPT | platform~reworkd_platform~web~api~agent~memory~memory.py | from abc import ABC
from typing import List, Tuple
import weaviate
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Weaviate
from reworkd_platform.settings import settings
SimilarText = List[Tuple[str, float]]
# Base class for AgentMemory
# Ensure we use __enter__ and __exit__ so that connections are closed
class AgentMemory(ABC):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback) -> None:
pass
def add_task(self, task: str, result: str) -> None:
pass
def get_similar_tasks(
self, query: str, similarity_threshold: float = 0.7
) -> SimilarText:
pass
class WeaviateMemory(AgentMemory):
db: Weaviate = None
def __init__(self, index_name: str):
self.index_name = index_name
self.text_key = "agentGPT_Tasks"
self.client = weaviate.Client(settings.vector_db_url)
def __enter__(self):
self.db = Weaviate(
self.client,
self.index_name,
self.text_key,
embedding=OpenAIEmbeddings(openai_api_key=settings.openai_api_key),
by_text=False,
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.client.__del__()
def add_task(self, task: str, result: str) -> None:
self.db.add_texts([task], [{"result": result}])
def get_similar_tasks(
self, query: str, similarity_threshold: float = 0.7
) -> SimilarText:
# Get similar tasks
results = self.db.similarity_search_with_score(query)
# Sort by score
results.sort(key=lambda x: x[1], reverse=True)
# Return
return [
(text.page_content, score)
for [text, score] in results
if score > similarity_threshold
]
| [] |
2024-01-10 | hiyoit/chatgpt-on-wechat | bot~chatgpt~chat_gpt_bot.py | # encoding:utf-8
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
from bot.session_manager import Session, SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from config import conf, load_config
from common.log import logger
from common.token_bucket import TokenBucket
from common.expired_dict import ExpiredDict
import openai
import openai.error
import time
# OpenAI对话模型API (可用)
class ChatGPTBot(Bot,OpenAIImage):
def __init__(self):
super().__init__()
# set the default api_key
openai.api_key = conf().get('open_ai_api_key')
if conf().get('open_ai_api_base'):
openai.api_base = conf().get('open_ai_api_base')
proxy = conf().get('proxy')
if proxy:
openai.proxy = proxy
if conf().get('rate_limit_chatgpt'):
self.tb4chatgpt = TokenBucket(conf().get('rate_limit_chatgpt', 20))
self.sessions = SessionManager(ChatGPTSession, model= conf().get("model") or "gpt-3.5-turbo")
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[CHATGPT] query={}".format(query))
session_id = context['session_id']
reply = None
clear_memory_commands = conf().get('clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, '记忆已清除')
elif query == '#清除所有':
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, '所有人记忆已清除')
elif query == '#更新配置':
load_config()
reply = Reply(ReplyType.INFO, '配置已更新')
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[CHATGPT] session query={}".format(session.messages))
api_key = context.get('openai_api_key')
# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, new_query, session_id)
reply_content = self.reply_text(session, session_id, api_key, 0)
logger.debug("[CHATGPT] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content["content"], reply_content["completion_tokens"]))
if reply_content['completion_tokens'] == 0 and len(reply_content['content']) > 0:
reply = Reply(ReplyType.ERROR, reply_content['content'])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content['content'])
logger.debug("[CHATGPT] reply {} used 0 tokens.".format(reply_content))
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
else:
reply = Reply(ReplyType.ERROR, 'Bot不支持处理{}类型的消息'.format(context.type))
return reply
def compose_args(self):
return {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"temperature":conf().get('temperature', 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p":1,
"frequency_penalty":conf().get('frequency_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty":conf().get('presence_penalty', 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get('request_timeout', 60), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get('request_timeout', 120), #重试超时时间,在这个时间内,将会自动重试
}
def reply_text(self, session:ChatGPTSession, session_id, api_key, retry_count=0) -> dict:
'''
call openai's ChatCompletion to get the answer
:param session: a conversation session
:param session_id: session id
:param retry_count: retry count
:return: {}
'''
try:
if conf().get('rate_limit_chatgpt') and not self.tb4chatgpt.get_token():
raise openai.error.RateLimitError("RateLimitError: rate limit exceeded")
# if api_key == None, the default openai.api_key will be used
response = openai.ChatCompletion.create(
api_key=api_key, messages=session.messages, **self.compose_args()
)
# logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response.choices[0]['message']['content']}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, openai.error.RateLimitError):
logger.warn("[CHATGPT] RateLimitError: {}".format(e))
result['content'] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(5)
elif isinstance(e, openai.error.Timeout):
logger.warn("[CHATGPT] Timeout: {}".format(e))
result['content'] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif isinstance(e, openai.error.APIConnectionError):
logger.warn("[CHATGPT] APIConnectionError: {}".format(e))
need_retry = False
result['content'] = "我连接不到你的网络"
else:
logger.warn("[CHATGPT] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session_id)
if need_retry:
logger.warn("[CHATGPT] 第{}次重试".format(retry_count+1))
return self.reply_text(session, session_id, api_key, retry_count+1)
else:
return result
class AzureChatGPTBot(ChatGPTBot):
def __init__(self):
super().__init__()
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
def compose_args(self):
args = super().compose_args()
args["engine"] = args["model"]
del(args["model"])
return args | [
"content",
"我现在有点累了,等会再来吧"
] |
2024-01-10 | KMLEE1989/Study | NLP~NLP6.py | from operator import index
import pyLDAvis
from sklearn import datasets
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))
documents = dataset.data
print(len(documents))
print(documents[3])
# 11314
#[0] Well i'm not sure about the story nad it did seem biased. What
# I disagree with is your statement that the U.S. Media is out to
# ruin Israels reputation. That is rediculous. The U.S. media is
# the most pro-israeli media in the world. Having lived in Europe
# I realize that incidences such as the one described in the
# letter have occured. The U.S. media as a whole seem to try to
# ignore them. The U.S. is subsidizing Israels existance and the
# Europeans are not (at least not to the same degree). So I think
# that might be a reason they report more clearly on the
# atrocities.
# What is a shame is that in Austria, daily reports of
# the inhuman acts commited by Israeli soldiers and the blessing
# received from the Government makes some of the Holocaust guilt
# go away. After all, look how the Jews are treating other races
# when they got power. It is unfortunate.
# [3]
# Notwithstanding all the legitimate fuss about this proposal, how much
# of a change is it? ATT's last product in this area (a) was priced over
# $1000, as I suspect 'clipper' phones will be; (b) came to the customer
# with the key automatically preregistered with government authorities. Thus,
# aside from attempting to further legitimize and solidify the fed's posture,
# Clipper seems to be "more of the same", rather than a new direction.
# Yes, technology will eventually drive the cost down and thereby promote
# more widespread use- but at present, the man on the street is not going
# to purchase a $1000 crypto telephone, especially when the guy on the other
# end probably doesn't have one anyway. Am I missing something?
# The real question is what the gov will do in a year or two when air-
# tight voice privacy on a phone line is as close as your nearest pc. That
# has got to a problematic scenario for them, even if the extent of usage
# never surpasses the 'underground' stature of PGP.
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from gensim.parsing.preprocessing import preprocess_string
nltk.download('stopwords')
def clean_text(d):
pattern = r'[^a-zA-Z\s]'
text = re.sub(pattern, '', d)
return text
def clean_stopword(d):
stop_words = stopwords.words('english')
return ' '.join([w.lower() for w in d.split() if w.lower() not in stop_words and len(w) > 3])
def preprocessing(d):
return preprocess_string(d)
import pandas as pd
news_df = pd.DataFrame({'article':documents})
# print(len(news_df))
# 11314
news_df.replace("", float("NaN"), inplace=True)
# print(news_df.isnull().values.any())
# True
news_df.replace("", float("NaN"), inplace=True)
news_df.dropna(inplace=True)
# print(len(news_df))
# 11096
news_df['article'] = news_df['article'].apply(clean_text)
print(news_df['article'])
# 0 Well i'm not sure about the story nad it did s...
# 1 \n\n\n\n\n\n\nYeah, do you expect people to re...
# 2 Although I realize that principle is not one o...
# 3 Notwithstanding all the legitimate fuss about ...
# 4 Well, I will have to change the scoring on my ...
# ...
# 11309 Danny Rubenstein, an Israeli journalist, will ...
# 11310 \n
# 11311 \nI agree. Home runs off Clemens are always m...
# 11312 I used HP DeskJet with Orange Micros Grappler ...
# 11313 ^^^^^^\n...
# Name: article, Length: 11096, dtype: object
news_df['article'] = news_df['article'].apply(clean_stopword)
print(news_df['article'])
# 0 well sure story seem biased. what disagree sta...
# 1 yeah, expect people read faq, etc. actually ac...
# 2 although realize principle strongest points, w...
# 3 notwithstanding legitimate fuss proposal, much...
# 4 well, change scoring playoff pool. unfortunate...
# ...
# 11309 danny rubenstein, israeli journalist, speaking...
# 11310
# 11311 agree. home runs clemens always memorable. kin...
# 11312 used deskjet orange micros grappler system6.0....
# 11313 ^^^^^^ argument murphy. scared hell came last ...
# Name: article, Length: 11096, dtype: object
tokenized_news = news_df['article'].apply(preprocessing)
tokenized_news = tokenized_news.to_list()
# print(tokenized_news)
import numpy as np
drop_news = [index for index, sentence in enumerate(tokenized_news) if len(sentence) <= 1]
news_texts = np.delete(tokenized_news, drop_news, axis=0)
# print(len(news_texts))
# 10936
from gensim import corpora
dictionary = corpora.Dictionary(news_texts)
corpus = [dictionary.doc2bow(text) for text in news_texts]
# print(corpus[1])
# [(51, 1), (52, 1), (53, 1), (54, 1), (55, 1), (56, 1), (57, 1), (58, 2), (59, 1), (60, 1), (61, 1), (62, 2), (63, 1), (64, 1), (65, 1), (66, 1), (67, 1),
# (68, 2), (69, 3), (70, 1), (71, 1), (72, 1), (73, 1), (74, 1), (75, 2), (76, 1), (77, 1), (78, 1), (79, 1), (80, 1), (81, 1), (82, 2), (83, 1), (84, 1), (85, 1), (86, 1)]
from gensim.models import LsiModel
# lsi_model = LsiModel(corpus, num_topics=20, id2word=dictionary)
# topics = lsi_model.print_topics()
# print(topics)
from gensim.models.coherencemodel import CoherenceModel
# min_topics, max_topics = 20, 25
# coherence_scores = []
# for num_topics in range(min_topics, max_topics):
# model = LsiModel(corpus, num_topics=num_topics, id2word=dictionary)
# coherence = CoherenceModel(model=model, texts=news_texts, dictionary=dictionary)
# coherence_scores.append(coherence.get_coherence())
# print(coherence_scores)
# import matplotlib.pyplot as plt
# plt.style.use('seaborn-white')
# x=[int(i) for i in range(min_topics, max_topics)]
# plt.figure(figsize=(10,6))
# plt.plot(x, coherence_scores)
# plt.xlabel('Number of Topics')
# plt.ylabel('Coherence Scores')
# plt.show()
# lsi_model = LsiModel(corpus, num_topics=24, id2word=dictionary)
# topics = lsi_model.print_topic(num_topics=24)
# print(topics)
from gensim.models import LdaModel
# lda_model = LdaModel(corpus, num_topics=20, id2word=dictionary)
# topics = lda_model.print_topics()
# print(topics)
from gensim.models.coherencemodel import CoherenceModel
# min_topics, max_topics = 20, 25
# coherence_scores = []
# for num_topics in range(min_topics, max_topics):
# model = LdaModel(corpus, num_topics=num_topics, id2word=dictionary)
# coherence = CoherenceModel(model=model, texts=news_texts, dictionary=dictionary)
# coherence_scores.append(coherence.get_coherence())
# print(coherence_scores)
# import matplotlib.pyplot as plt
# plt.style.use('seaborn-white')
# x=[int(i) for i in range(min_topics, max_topics)]
# plt.figure(figsize=(10,6))
# plt.plot(x, coherence_scores)
# plt.xlabel('Number of Topics')
# plt.ylabel('Coherence Scores')
# plt.show()
################################################################################################
# lda_model = LdaModel(corpus, num_topics=23, id2word=dictionary)
# topics = lda_model.print_topics(num_topics=23)
# print(topics)
# import pyLDAvis.gensim_models
# pyLDAvis.enable_notebook()
# vis = pyLDAvis.gensim_models.prepare(lda_model, corpus, dictionary)
# pyLDAvis.display(vis)
# pandas pyLDAvis ====> version issue It is not working here need more research
#####################################################################################################
| [] |
2024-01-10 | posix4e/puppet | backend~backend.py | import json
import uuid
from datetime import datetime
import mistune
import openai
from dotenv import load_dotenv
from easycompletion import openai_text_call
from fastapi import FastAPI, HTTPException
from fastapi import Request
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.testclient import TestClient
from gradio import Interface, TabbedInterface, components, mount_gradio_app
from pydantic import BaseModel
from pygments import highlight
from pygments.formatters import html
from pygments.lexers import get_lexer_by_name
from sqlalchemy import JSON, Column, Integer, String, create_engine
from fastapi.responses import FileResponse, JSONResponse
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker
from uvicorn import Config, Server
LANGS = [
"gpt-3.5-turbo",
"gpt-4",
]
Base = declarative_base()
class User(Base):
__tablename__ = "user_data"
id = Column(Integer, primary_key=True, autoincrement=True)
uid = Column(String, nullable=False)
openai_key = Column(String, unique=True, nullable=False)
def __repr__(self):
return f"User(id={self.id}, uid={self.uid}"
class AndroidHistory(Base):
__tablename__ = "android_history"
id = Column(Integer, primary_key=True, autoincrement=True)
uid = Column(String, nullable=False)
question = Column(String, nullable=False)
answer = Column(String, nullable=False)
def __repr__(self):
return f"AndroidHistory(question={self.question}, answer={self.answer}"
class BrowserHistory(Base):
__tablename__ = "browser_history"
id = Column(Integer, primary_key=True, autoincrement=True)
machineid = Column(String, nullable=False)
uid = Column(String, nullable=False)
url = Column(String, nullable=False)
def __repr__(self):
return f"BrowserHistory(machineid={self.machineid}, url={self.url}"
# Add a new table to store the commands
class Command(Base):
__tablename__ = "commands"
id = Column(Integer, primary_key=True, autoincrement=True)
uid = Column(String, nullable=False)
command = Column(String, nullable=False)
status = Column(String, nullable=False, default="queued")
def __repr__(self):
return f"self.command"
engine = create_engine("sqlite:///puppet.db")
Base.metadata.create_all(bind=engine)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
load_dotenv()
app = FastAPI(debug=True)
app.add_middleware(GZipMiddleware, minimum_size=1000)
class RegisterItem(BaseModel):
openai_key: str
class CommandItem(BaseModel):
uid: str
command: str
class EventItem(BaseModel):
uid: str
event: str
class AssistItem(BaseModel):
uid: str
prompt: str
version: str
class SaveURLItem(BaseModel):
uid: str
machineid: str
url: str
@app.post("/add_command")
async def add_command(item: CommandItem):
db: Session = SessionLocal()
new_command = Command(uid=item.uid, command=item.command)
db.add(new_command)
db.commit()
db.refresh(new_command)
return {"message": "Command added"}
@app.post("/send_event")
async def send_event(item: EventItem):
print(f"Received event from {item.uid}:\n{item.event}")
with open(f"{item.uid}_events.txt", "a") as f:
f.write(f"{datetime.now()} - {item.event}\n")
db: Session = SessionLocal()
user = db.query(User).filter(User.uid == item.uid).first()
if not user:
raise HTTPException(status_code=400, detail="Invalid uid")
# Update the last time send_event was called and increment the number of events
user.last_event = datetime.now()
db.commit()
# Get all the queued commands for this user
commands = (
db.query(Command)
.filter(Command.uid == item.uid, Command.status == "queued")
.all()
)
for command in commands:
command.status = "running"
db.commit()
return {
"message": "Event received",
"commands": [command.command for command in commands],
}
@app.post("/register")
async def register(item: RegisterItem):
db: Session = SessionLocal()
existing_user = db.query(User).filter(User.openai_key == item.openai_key).first()
if existing_user:
return {"uid": existing_user.uid} # return existing UUID
else:
new_user = User(uid=str(uuid.uuid4()), openai_key=item.openai_key)
db.add(new_user)
db.commit()
db.refresh(new_user)
return {"uid": new_user.uid}
@app.post("/assist")
async def assist(item: AssistItem):
db: Session = SessionLocal()
user = db.query(User).filter(User.uid == item.uid).first()
if not user:
raise HTTPException(status_code=400, detail="Invalid uid")
# Call OpenAI
openai.api_key = user.openai_key
response = openai_text_call(item.prompt, model=item.version)
# Update the last time assist was called
user.last_assist = datetime.now()
# Store the history
new_history = AndroidHistory(
uid=item.uid, question=item.prompt, answer=response["text"]
)
db.add(new_history)
db.commit()
return response
@app.get("/get_history/{uid}")
async def get_history(uid: str):
db: Session = SessionLocal()
history = db.query(BrowserHistory).filter(BrowserHistory.uid == uid).all()
browser_history = db.query(AndroidHistory).filter(AndroidHistory.uid == uid).all()
commands = db.query(Command).filter(Command.uid == uid).all()
try:
with open(f"{uid}_events.txt", "r") as f:
events = f.read().split(",")
except FileNotFoundError:
events = ""
return {
"events": events,
"history": [h.__dict__ for h in history],
"browser_history": [h.__dict__ for h in browser_history],
"commands": [c.__dict__ for c in commands],
}
@app.post("/saveurl")
async def saveurl(item: SaveURLItem):
db: Session = SessionLocal()
new_browser_history = BrowserHistory(
uid=item.uid, machineid=item.machineid, url=item.url
)
db.add(new_browser_history)
db.commit()
db.refresh(new_browser_history)
return {"message": "Browser history saved"}
def assist_interface(uid, prompt, gpt_version):
client = TestClient(app)
response = client.post(
"/assist",
json={"uid": uid, "prompt": prompt, "version": gpt_version},
)
return generate_html_response_from_openai(response.text)
def get_user_interface(uid):
db: Session = SessionLocal()
user = db.query(User).filter(User.uid == uid).first()
if not user:
return {"message": "No user with this uid found"}
return str(user)
class HighlightRenderer(mistune.HTMLRenderer):
def block_code(self, code, info=None):
if info:
lexer = get_lexer_by_name(info, stripall=True)
formatter = html.HtmlFormatter()
return highlight(code, lexer, formatter)
return "<pre><code>" + mistune.escape(code) + "</code></pre>"
def generate_html_response_from_openai(openai_response):
r"""
This is used by the gradio to extract all of the user
data and write it out as a giant json blob that can be easily diplayed.
>>>
>>> data = {'text': 'This is a test'}
>>> generate_html_response_from_openai(json.dumps(data))
'<html><p>This is a test</p>\n</html>'
"""
openai_response = json.loads(openai_response)
openai_response = openai_response["text"]
markdown = mistune.create_markdown(renderer=HighlightRenderer())
openai_response = markdown(openai_response)
return f"<html>{openai_response}</html>"
def get_assist_interface():
gpt_version_dropdown = components.Dropdown(label="GPT Version", choices=LANGS)
return Interface(
fn=assist_interface,
inputs=[
components.Textbox(label="UID", type="text"),
components.Textbox(label="Prompt", type="text"),
gpt_version_dropdown,
],
outputs="html",
title="OpenAI Text Generation",
description="Generate text using OpenAI's GPT-4 model.",
)
def get_db_interface():
return Interface(
fn=get_user_interface,
inputs="text",
outputs="text",
title="Get User Details",
description="Get user details from the database",
)
## The register interface uses this weird syntax to make sure we don't copy and
## paste quotes in the uid when we output it
def register_interface(openai_key):
client = TestClient(app)
response = client.post(
"/register",
json={"openai_key": openai_key},
)
return response.json()
def get_register_interface():
def wrapper(openai_key):
result = register_interface(openai_key)
return f"""<p id='uid'>{result["uid"]}</p>
<button onclick="navigator.clipboard.writeText(document.getElementById('uid').innerText)">
Copy to clipboard
</button>"""
return Interface(
fn=wrapper,
inputs=[components.Textbox(label="OpenAI Key", type="text")],
outputs=components.HTML(),
title="Register New User",
description="Register a new user by entering an OpenAI key.",
)
def get_history_interface(uid):
client = TestClient(app)
response = client.get(f"/get_history/{uid}")
return response.json()
def get_history_gradio_interface():
return Interface(
fn=get_history_interface,
inputs=[components.Textbox(label="UID", type="text")],
outputs="json",
title="Get User History",
description="Get the history of questions and answers for a given user.",
)
def add_command_interface(uid, command):
client = TestClient(app)
response = client.post(
"/add_command",
json={"uid": uid, "command": command},
)
return response.json()
@app.get("/.well-known/ai-plugin.json")
async def plugin_manifest(request: Request):
host = request.headers["host"]
with open(".well-known/ai-plugin.json") as f:
text = f.read().replace("PLUGIN_HOSTNAME", "https://posix4e-puppet.hf.space/")
return JSONResponse(content=json.loads(text))
@app.get("/openapi.yaml")
async def openai_yaml(request: Request):
host = request.headers["host"]
with open(".well-known/openapi.yaml") as f:
text = f.read().replace("PLUGIN_HOSTNAME", "https://posix4e-puppet.hf.space/")
return JSONResponse(content=json.loads(text))
@app.get("/detectcommand/{command}")
async def get_command(command: str, item: AssistItem):
db: Session = SessionLocal()
user = db.query(User).filter(User.uid == item.uid).first()
if not user:
raise HTTPException(status_code=400, detail="Invalid uid")
openai.api_key = user.openai_key
response = openai_text_call(item.prompt, model=item.version)
return JSONResponse(content=response, status_code=200)
@app.get("/logo.png")
async def plugin_logo():
return FileResponse("/.well-known/logo.jpeg")
def get_add_command_interface():
return Interface(
fn=add_command_interface,
inputs=[
components.Textbox(label="UID", type="text"),
components.Textbox(label="Command", type="text"),
],
outputs="json",
title="Add Command",
description="Add a new command for a given user.",
)
app = mount_gradio_app(
app,
TabbedInterface(
[
get_assist_interface(),
get_db_interface(),
get_register_interface(),
get_history_gradio_interface(),
get_add_command_interface(),
]
),
path="/",
)
if __name__ == "__main__":
config = Config("backend:app", host="0.0.0.0", port=7860, reload=True)
server = Server(config)
server.run()
| [] |
2024-01-10 | danbeaumont95/mma-fight-predictor-backend | mma_fight_predictor~mma_fight_predictor_api~Events~events.py | import requests
from bs4 import BeautifulSoup
from rest_framework.views import APIView
from ..helpers.helpers import return_response, get_soup_from_url, compare_fractions, get_fighters_fighting_style, get_fighters_record_again_each_opponents_fight_style_using_url, find_max, get_fighters_fighting_stance, get_all_fights_in_event, get_basic_fight_stats_from_event, get_fighters_wins_if_in_top_10, get_upcoming_events, get_fighters_record_again_each_opponents_fight_style_using_db
from rest_framework import status
from rest_framework.decorators import api_view
import pandas as pd
import openai
import os
import time
from dotenv import load_dotenv
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
load_dotenv()
class EventsList(APIView):
def get(self, request):
events = get_upcoming_events()
# dan dont need below
# get_fighters_wins_if_in_top_10('conor mcgregor')
return return_response(events, 'Sucess', status.HTTP_200_OK)
@api_view(['POST'])
def get_event_by_id(request):
# url = f'http://ufcstats.com/event-details/3c6976f8182d9527'
url = request.data.get('event_url')
fights_data = get_all_fights_in_event(url)
return return_response(fights_data, 'Success', status.HTTP_200_OK)
@api_view(['POST'])
def get_basic_fight_stats(request):
url = request.data.get('fight_url')
fight_date = request.data.get('fight_date')
fight_stats = get_basic_fight_stats_from_event(url)
return return_response(fight_stats, 'Success', status.HTTP_200_OK)
@api_view(['GET'])
def get_in_depth_stats(request):
fighter_1 = request.GET.get('fighter_1')
fighter_2 = request.GET.get('fighter_2')
fighter_1_style = get_fighters_fighting_style(fighter_1)
fighter_2_style = get_fighters_fighting_style(fighter_2)
fighter_1_stance = get_fighters_fighting_stance(fighter_1)
fighter_1_stance = fighter_1_stance.lower()
fighter_2_stance = get_fighters_fighting_stance(fighter_2)
fighter_2_stance = fighter_2_stance.lower()
fighter_1_names_list = fighter_1.split()
fighter_1_stats_search_url = f'http://ufcstats.com/statistics/fighters/search?query={fighter_1_names_list[0]}'
fighter_1_search_soup = get_soup_from_url(fighter_1_stats_search_url)
fighter_1_a_tag = fighter_1_search_soup.find('a', text=re.compile(fighter_1_names_list[1].lower(), re.I))
fighter_1_stats_url_href = fighter_1_a_tag.get('href')
fighter_2_names_list = fighter_2.split()
fighter_2_stats_search_url = f'http://ufcstats.com/statistics/fighters/search?query={fighter_2_names_list[0]}&page=all'
fighter_2_search_soup = get_soup_from_url(fighter_2_stats_search_url)
fighter_2_a_tag = fighter_2_search_soup.find('a', text=re.compile(fighter_2_names_list[1].lower(), re.I))
fighter_2_stats_url_href = fighter_2_a_tag.get('href')
# fighter_1_data = get_fighters_record_again_each_opponents_fight_style_using_url(fighter_1,fighter_1_stats_url_href ) # Legacy but gets up to date stats, db currently dosnt have fights past 2023-08-12
fighter_1_data = get_fighters_record_again_each_opponents_fight_style_using_db(fighter_1)
fighter_2_data = get_fighters_record_again_each_opponents_fight_style_using_db(fighter_1)
# fighter_2_data = get_fighters_record_again_each_opponents_fight_style_using_url(fighter_2,fighter_2_stats_url_href ) # Legacy but gets up to date stats, db currently dosnt have fights past 2023-08-12
all_fighter_1_opponents = fighter_1_data['opponents']
all_fighter_1_opponents= [item.lower() for item in all_fighter_1_opponents]
all_fighter_2_opponents = fighter_2_data['opponents']
all_fighter_2_opponents = [item.lower() for item in all_fighter_2_opponents]
all_fighter_1_results_against_all_opponents = fighter_1_data['result_against_opponents']
all_fighter_2_results_against_all_opponents = fighter_2_data['result_against_opponents']
fighter_1_record_agains_each_opponent_style = fighter_1_data['record']
fighter_2_record_agains_each_opponent_style = fighter_2_data['record']
fighter_1_record_agains_each_opponent_stance = fighter_1_data['fighter_1_record_agains_each_opponent_stance']
fighter_2_record_agains_each_opponent_stance = fighter_2_data['fighter_1_record_agains_each_opponent_stance']
fighter_1_record_agains_fighter_2_srtle = fighter_1_record_agains_each_opponent_style[fighter_2_style] if fighter_2_style in fighter_1_record_agains_each_opponent_style else []
fighter_1_record_agains_fighter_2_stance = fighter_1_record_agains_each_opponent_stance[fighter_2_stance] if fighter_2_stance in fighter_1_record_agains_each_opponent_stance else []
fighter_2_record_agains_fighter_1_srtle = fighter_2_record_agains_each_opponent_style[fighter_1_style] if fighter_1_style in fighter_2_record_agains_each_opponent_style else []
fighter_2_record_agains_fighter_1_stance = fighter_2_record_agains_each_opponent_stance[fighter_1_stance] if fighter_1_stance in fighter_2_record_agains_each_opponent_stance else []
have_fighters_fought_before: bool = fighter_1.lower() in all_fighter_2_opponents or fighter_2.lower() in all_fighter_1_opponents
fighter_with_more_wins_over_other = None
fighter_with_better_record_against_opponent_style = None
fighter_1_record_against_fighter_2 = None
fighter_2_record_against_fighter_1 = None
amount_of_fighter_1_wins_agains_fighter_2 = None
amount_of_fighter_1_loss_agains_fighter_2 = None
amount_of_fighter_1_draw_agains_fighter_2 = None
amount_of_fighter_1_nc_agains_fighter_2 = None
amount_of_fighter_2_wins_agains_fighter_1 = None
amount_of_fighter_2_loss_agains_fighter_1 = None
amount_of_fighter_2_draw_agains_fighter_1 = None
amount_of_fighter_2_nc_agains_fighter_1 = None
if have_fighters_fought_before == True:
fighter_1_result_previous_results_against_fighter_2 = all_fighter_1_results_against_all_opponents[fighter_2.lower()]
fighter_2_result_previous_results_against_fighter_1 = all_fighter_2_results_against_all_opponents[fighter_1.lower()]
amount_of_fighter_1_wins_agains_fighter_2 = fighter_1_result_previous_results_against_fighter_2.count('win')
amount_of_fighter_1_loss_agains_fighter_2 = fighter_1_result_previous_results_against_fighter_2.count('loss')
amount_of_fighter_1_draw_agains_fighter_2 = fighter_1_result_previous_results_against_fighter_2.count('draw')
amount_of_fighter_1_nc_agains_fighter_2 = fighter_1_result_previous_results_against_fighter_2.count('nc')
fighter_1_record_against_fighter_2 = f"{amount_of_fighter_1_wins_agains_fighter_2}-{amount_of_fighter_1_loss_agains_fighter_2}-{amount_of_fighter_1_draw_agains_fighter_2} -{amount_of_fighter_1_nc_agains_fighter_2}nc"
amount_of_fighter_2_wins_agains_fighter_1 = fighter_2_result_previous_results_against_fighter_1.count('win')
amount_of_fighter_2_loss_agains_fighter_1 = fighter_2_result_previous_results_against_fighter_1.count('loss')
amount_of_fighter_2_draw_agains_fighter_1 = fighter_2_result_previous_results_against_fighter_1.count('draw')
amount_of_fighter_2_nc_agains_fighter_1 = fighter_2_result_previous_results_against_fighter_1.count('nc')
fighter_2_record_against_fighter_1 = f"{amount_of_fighter_2_wins_agains_fighter_1}-{amount_of_fighter_2_loss_agains_fighter_1}-{amount_of_fighter_2_draw_agains_fighter_1} -{amount_of_fighter_2_nc_agains_fighter_1}nc"
if amount_of_fighter_1_wins_agains_fighter_2 > amount_of_fighter_2_wins_agains_fighter_1:
fighter_with_more_wins_over_other = fighter_1
elif amount_of_fighter_2_wins_agains_fighter_1 > amount_of_fighter_1_wins_agains_fighter_2:
fighter_with_more_wins_over_other = fighter_2
# Do something with return value if have fought before and equal
else:
fighter_1_result_previous_results_against_fighter_2 = None
amount_of_times_fighter_1_has_fought_fighter_2_style = len(fighter_1_record_agains_fighter_2_srtle) if fighter_1_record_agains_fighter_2_srtle != None else None
amount_of_times_fighter_1_has_fought_fighter_2_stance = len(fighter_1_record_agains_fighter_2_stance) if fighter_1_record_agains_fighter_2_stance != None else None
amount_of_wins_fighter_1_has_against_fighter_2_style = fighter_1_record_agains_fighter_2_srtle.count('win') if isinstance(fighter_1_record_agains_fighter_2_srtle, list) else None
amount_of_wins_fighter_1_has_against_fighter_2_stance = fighter_1_record_agains_fighter_2_stance.count('win') if isinstance(fighter_1_record_agains_fighter_2_stance, list) else None
if amount_of_wins_fighter_1_has_against_fighter_2_style == 0:
percentage_of_wins_fighter_1_has_against_fighter_2_style = 0
else:
percentage_of_wins_fighter_1_has_against_fighter_2_style = round(amount_of_wins_fighter_1_has_against_fighter_2_style / amount_of_times_fighter_1_has_fought_fighter_2_style, 2) if amount_of_wins_fighter_1_has_against_fighter_2_style != None else None
if amount_of_wins_fighter_1_has_against_fighter_2_stance == 0:
percentage_of_wins_fighter_1_has_against_fighter_2_stance = 0
else:
percentage_of_wins_fighter_1_has_against_fighter_2_stance = round(amount_of_wins_fighter_1_has_against_fighter_2_stance / amount_of_times_fighter_1_has_fought_fighter_2_stance, 2) if amount_of_wins_fighter_1_has_against_fighter_2_stance != None else None
amount_of_times_fighter_2_has_fought_fighter_1_style = len(fighter_2_record_agains_fighter_1_srtle) if fighter_2_record_agains_fighter_1_srtle != None else None
amount_of_times_fighter_2_has_fought_fighter_1_stance = len(fighter_2_record_agains_fighter_1_stance) if fighter_2_record_agains_fighter_1_stance != None else None
amount_of_wins_fighter_2_has_against_fighter_1_style = fighter_2_record_agains_fighter_1_srtle.count('win') if isinstance(fighter_2_record_agains_fighter_1_srtle, list) else None
amount_of_wins_fighter_2_has_against_fighter_1_stance = fighter_2_record_agains_fighter_1_stance.count('win') if isinstance(fighter_2_record_agains_fighter_1_stance, list) else None
if amount_of_wins_fighter_2_has_against_fighter_1_style == 0:
percentage_of_wins_fighter_2_has_against_fighter_1_style = 0
else:
percentage_of_wins_fighter_2_has_against_fighter_1_style = round(amount_of_wins_fighter_2_has_against_fighter_1_style / amount_of_times_fighter_2_has_fought_fighter_1_style, 2) if amount_of_wins_fighter_2_has_against_fighter_1_style != None else None
if amount_of_wins_fighter_2_has_against_fighter_1_stance == 0:
percentage_of_wins_fighter_2_has_against_fighter_1_stance = 0
else:
percentage_of_wins_fighter_2_has_against_fighter_1_stance = round(amount_of_wins_fighter_2_has_against_fighter_1_stance / amount_of_times_fighter_2_has_fought_fighter_1_stance, 2) if amount_of_wins_fighter_2_has_against_fighter_1_stance != None else None
fighter_with_better_record_against_opponent_style = find_max((fighter_1, percentage_of_wins_fighter_1_has_against_fighter_2_style), (fighter_2, percentage_of_wins_fighter_2_has_against_fighter_1_style)) if percentage_of_wins_fighter_1_has_against_fighter_2_style != None else None
fighter_with_better_record_against_opponent_stance = find_max((fighter_1, percentage_of_wins_fighter_1_has_against_fighter_2_stance), (fighter_2, percentage_of_wins_fighter_2_has_against_fighter_1_stance)) if percentage_of_wins_fighter_1_has_against_fighter_2_stance != None else None
res = {'fighter_with_better_record_against_opponent_style': fighter_with_better_record_against_opponent_style, 'fighter_with_better_record_against_opponent_stance': fighter_with_better_record_against_opponent_stance, 'fighter_with_more_wins_over_other': fighter_with_more_wins_over_other, 'percentage_of_wins_fighter_1_has_against_fighter_2_style': percentage_of_wins_fighter_1_has_against_fighter_2_style, 'percentage_of_wins_fighter_2_has_against_fighter_1_style': percentage_of_wins_fighter_2_has_against_fighter_1_style, 'fighter_1_result_previous_results_against_fighter_2': fighter_1_result_previous_results_against_fighter_2, 'fighter_1_record_against_fighter_2': fighter_1_record_against_fighter_2, 'fighter_2_record_against_fighter_1': fighter_2_record_against_fighter_1, 'fighter_1_stance': fighter_1_stance, 'fighter_2_stance': fighter_2_stance, 'amount_of_times_fighter_2_has_fought_fighter_1_stance': amount_of_times_fighter_2_has_fought_fighter_1_stance, 'amount_of_wins_fighter_2_has_against_fighter_1_stance': amount_of_wins_fighter_2_has_against_fighter_1_stance,
'amount_of_times_fighter_1_has_fought_fighter_2_stance': amount_of_times_fighter_1_has_fought_fighter_2_stance, 'amount_of_wins_fighter_1_has_against_fighter_2_stance': amount_of_wins_fighter_1_has_against_fighter_2_stance,
'percentage_of_wins_fighter_1_has_against_fighter_2_stance': percentage_of_wins_fighter_1_has_against_fighter_2_stance,
'percentage_of_wins_fighter_2_has_against_fighter_1_stance': percentage_of_wins_fighter_2_has_against_fighter_1_stance,
'amount_of_times_fighter_1_has_fought_fighter_2_style': amount_of_times_fighter_1_has_fought_fighter_2_style,
'amount_of_times_fighter_2_has_fought_fighter_1_style': amount_of_times_fighter_2_has_fought_fighter_1_style,
'amount_of_wins_fighter_1_has_against_fighter_2_style': amount_of_wins_fighter_1_has_against_fighter_2_style,
'amount_of_wins_fighter_2_has_against_fighter_1_style': amount_of_wins_fighter_2_has_against_fighter_1_style,
}
return return_response(res, 'Success', status.HTTP_200_OK)
@api_view(['GET'])
def get_next_event_poster(request):
ufc_event_url = 'https://www.ufc.com/events'
response = requests.get(ufc_event_url)
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
poster_element = soup.find('div', class_='c-hero__image')
if poster_element:
source_element = poster_element.find('source')
if source_element:
srcset = source_element['srcset']
urls = srcset.split(',')
first_url = urls[0].split(' ')[0]
return return_response(first_url, 'Success', status.HTTP_200_OK)
else:
return return_response({}, 'Error', status.HTTP_200_OK)
return return_response({}, 'Error', status.HTTP_200_OK)
| [] |
2024-01-10 | evanmeeks/tree-of-thoughts | example.py | import os
from tree_of_thoughts.openaiModels import OpenAILanguageModel
from tree_of_thoughts.treeofthoughts import TreeofThoughts
#
api_model= "gpt-3.5-turbo"
model = OpenAILanguageModel(api_key='', api_model=api_model)
#choose search algorithm('BFS' or 'DFS')
search_algorithm = "BFS"
# value or vote
evaluation_strategy = "value"
tree_of_thoughts= TreeofThoughts(model, search_algorithm)
input_problem = "use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation"
num_thoughts = 2
max_steps= 3
max_states = 5
value_threshold= 0.5
#call the solve emthod with the input problem and other params
solution = tree_of_thoughts.solve(input_problem,
num_thoughts=num_thoughts,
max_steps=max_states,
max_states=5,
value_threshold=value_threshold,
)
#use the solution in your production environment
print(f"solution: {solution}")
| [] |
2024-01-10 | evanmeeks/tree-of-thoughts | experiements~latest.py |
import concurrent.futures
from abc import ABC, abstractmethod
import openai
import os
import re
import guidance
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
import heapq
import json
DATA_PATH = './data'
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class HuggingLanguageModel(AbstractLanguageModel):
def __init__(self, model_name, model_tokenizer=None, verbose=False):
self.model = AutoModelForCausalLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_tokenizer or model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve {state_text}"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, inital_prompt, max_length=10):
state_values = {}
for state in states:
state_text = '\n'.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve {inital_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, num_return_sequences=1, max_length=max_length)
value_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
value = float(value_text)
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
class HFPipelineModel(AbstractLanguageModel):
def __init__(self, model_name, verbose=False):
self.pipeline = pipeline("text-generation", model=model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(input_ids=inputs["input_ids"], max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: \n\n\n'{state_text}'\n\n\n, pessimistically evaluate its value as a float between 0 and 1 based on its potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
generated_outputs = self.pipeline(prompt, max_length=max_length, num_return_sequences=1)
value_text = generated_outputs[0]["generated_text"]
value = float(value_text)
print(f'value {value}')
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
def load(model_nmae, verbose=False):
return HFPipelineModel(model_name, verbose)
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=True):
if api_key == "" or api_key == None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base == None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model == None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = "Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx'."
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=400,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
with open("openai.logs", 'a') as log_file:
log_file.write("Response : "+ text+"\n\n\n")
return text
def generate_text(self, prompt, k):
if self.use_chat_api:
thoughts = []
for _ in range(k):
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
print(f'thoughts: {thoughts}')
return thoughts
else:
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
thoughts = [self.openai_choice2text_handler(choice) for choice in response.choices]
return thoughts
def generate_thoughts(self, state, k):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
prompt = f"Given the current state of reasoning: \n\n\n'{state_text}'\n\n\nGenerate the next best coherent thought to achieve the reasoning process and get the solution: "
prompt += self.ReAct_prompt
print(prompt)
thoughts = self.generate_text(prompt, k)
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self, initial_prompt, state):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
prompt = f"Given the following reasoning: \n\n\n'{state_text}'\n Give me the best solution you can think of the task : {initial_prompt}"
answer = self.generate_text(prompt, 1)
# print(thoughts)
print(f"General solution : {answer}")
return answer
def evaluate_states(self, states, inital_prompt):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing {inital_prompt} and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT"
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
print(f'state: {value_text}')
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\n{states_text}\n\nVote, on the probability of this state of reasoning achieveing {inital_prompt} and become very pessimistic very NOTHING ELSE"
response = self.openai_api_call_handler(prompt, 50, 1)
print(f'state response: {response}')
best_state_text = self.openai_choice2text_handler(response.choices[0])
print(f"Best state text: {best_state_text}")
best_state = tuple(best_state_text.split())
print(f'best_state: {best_state}')
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True, api_base="", api_model="", enable_ReAct_prompting=False):
super().__init__(api_key, strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(f"Parallel generated thoughts: {thoughts}")
return thoughts
def parallel_evaluate_states(self, states, inital_prompt):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states, inital_prompt))
print(f"Parallel evaluated state values: {state_values}")
return state_values
class GuidanceLanguageModel(AbstractLanguageModel):
def __init__(self, model, strategy="cot", evaluation_strategy="value", enable_ReAct_prompting=False):
# gpt4 = guidance.llms.OpenAI("gpt-4")
# vicuna = guidance.llms.transformers.Vicuna("your_path/vicuna_13B", device_map="auto")
self.model = model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = '''{{#assistant~}}
{{gen 'Observation' temperature=0.5 max_tokens=50}}
{{~/assistant}}'''
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
self.thoughts_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Generate {{k}} coherent thoughts as short as possible to continue the reasoning process.
Don't answer the question yet.
{{~/user}}
%s
{{#assistant~}}
{{gen 'Thoughts' temperature=0.5 max_tokens=50}}
{{~/assistant}}
''' % self.ReAct_prompt, llm=self.model)
self.value_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Evaluate its value as a float between 0 and 1, and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Value' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
self.vote_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the following states of reasoning, vote for the best state:
{{states_text}}
Give the index of your voted best state(the 1st state has index 0), and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Vote' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
def model_response_handler(self, program, **kargs):
print("Calling guidance model(Modify Me to handle specific LLM response excpetions!)")
reponse = program(**kargs)
return reponse
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
state_text = ' '.join(state)
thoughts = []
for _ in range(k):
response = self.model_response_handler(self.thoughts_program, state_text=state_text, k=1)
text = response['Thoughts']
thoughts += [text]
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states):
#implement state evaluation logic using self.model
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
response = self.model_response_handler(self.value_program, state_text=state_text)
try:
value_text = response['Value']
print(f"Value text {value_text}")
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
response = self.model_response_handler(self.vote_program, states_text=states_text)
best_state_text = response['Vote']
print(f"Best state text: {best_state_text}")
best_state = int(best_state_text)
return {state: 1 if i == best_state else 0 for i in range(len(states))}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class GuidanceOpenAILanguageModel(GuidanceLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False):
if api_key == "" or api_key == None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base == None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model == None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
super().__init__(guidance.llms.OpenAI(self.api_model), strategy, evaluation_strategy, enable_ReAct_prompting)
def model_response_handler(self, program, **kargs):
error_msg = ''
while True:
try:
program.llm.max_retries = 60
guidance.llms.OpenAI.cache.clear()
response = program(**kargs)
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
except Exception as e:
if str(e) == f'''Too many (more than {guidance.llm.max_retries}) OpenAI API RateLimitError's in a row!''':
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
else:
error_msg = str(e)
break
raise Exception(error_msg)
# #------------------------------------> v0 end
# class TreeofThoughts:
# """
# 1. Thought Decomposition --> based on problem properties
# 2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
# sequentially using a propose prompt
# 3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
# 4. Choose a search algo based on tree structure [BFS or DFS]
# Implement chosen search algorithm for bfs (algo1):
# init S0 with the input x
# for t = 1 to T (step limit):
# generate candidate thoughts for each state in St-1
# eveluate the candiate states using the state evaluator V
# select the b most promising states for St
# return the final output by genertaing the thought for the best state in St for DFS(algo2)
# defien a recurseive DFS function with the current state s, step t, and other required params
# if t > T record the output by generating the thought for current state S
# for each candidate state s in the sorted list of generated thoughts for s:
# if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
# with s and t + 1
# execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
# """
# def __init__(self, model, search_algorithm):
# self.model = model
# self.search_algorithm = search_algorithm
# def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# start_time = time.time()
# if self.search_algorithm == 'BFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_bfs(x, k, T, b)
# if result:
# return result
# elif self.search_algorithm == 'DFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_dfs(x, k, T, vth)
# if result:
# return result
# else:
# raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
# def tot_bfs(self, x, k, T, b):
# S0 = {x}
# for t in range(1, T + 1):
# S0_t = {(*s, z) for s in S0 for z in self.model.generate_thoughts(s, k)}
# Vt = self.model.evaluate_states(S0_t, x)
# St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# S0 = set(St)
# print(f'S0L {S0}')
# return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# def tot_dfs(self, x, k, T, vth, pruning_threshold=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# output = []
# iteration_count = 0
# consecutive_convergence_count = 0
# prev_best_value = None
# def dfs(s, t):
# nonlocal consecutive_convergence_count, prev_best_value, iteration_count
# if t > T:
# thought = self.model.generate_thouhts(s, 1)
# value = self.model.evaluate_states({s}, x)[s]
# print(f'thought {thought} and value: {value}')
# output.append((thought, value))
# if confidence_threshold is not None and value >= confidence_threshold:
# return True
# if prev_best_value is not None and convergence_threshold is not None:
# if abs(value - prev_best_value) < convergence_threshold:
# consecutive_convergence_count += 1
# else:
# consecutive_convergence_count = 0
# prev_best_value = value
# iteration_count += 1
# if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
# return True
# return False
# for s_prime in sorted(self.model.generate_thoughts(s, k)):
# state_value = self.model.evaluate_states({s_prime}, x)[s_prime]
# if state_value > vth and (pruning_threshold is None or state_value >= pruning_threshold):
# if dfs((*s, s_prime), t + 1):
# return True
# return False
# dfs(x, 1)
# return max(output, key=lambda x: x[1]) if output else None
# #------------------------------------> v0 end
# #v1
# #------------------------------------> v1 start
# class OptimizedTreeofThoughts(TreeofThoughts):
# # def tot_bfs(self, x, k, T, b):
# # S0 = {x}
# # for t in range(1, T + 1):
# # S0_t = {(*s, z) for s in S0 for z in self.model.parallel_generate_thoughts(s, k)}
# # Vt = self.model.parallel_evaluate_states(S0_t)
# # St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# # S0 = set(St)
# # return self.model.parallel_generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# # def tot_dfs(self, x, k, T, vth):
# # output = []
# # def dfs(s, t):
# # if t > T:
# # output.append(self.model.parallel_generate_thoughts(s, 1))
# # return
# # for s_prime in sorted(self.model.parallel_generate_thoughts(s, k)):
# # if self.model.parallel_evaluate_states({s_prime})[s_prime] > vth:
# # dfs((*s, s_prime), t + 1)
# # dfs(x, 1)
# # return output
# def tot_bfs(self, x, k, T, b):
# S0 = {x}
# for t in range(1, T + 1):
# S0_t = {(*s, z) for s in S0 for z in self.model.parallel_generate_thoughts(s, k)}
# Vt = self.model.parallel_evaluate_states(S0_t)
# St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# S0 = set(St)
# return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# def tot_dfs(self, x, k, T, vth):
# output = []
# def dfs(s, t):
# if t > T:
# output.append(self.model.generate_thoughts(s, 1))
# return
# for s_prime in sorted(self.model.generate_thoughts(s, k)):
# if self.model.evaluate_states({s_prime})[s_prime] > vth:
# dfs((*s, s_prime), t + 1)
# dfs(x, 1)
# return output
# def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# start_time = time.time()
# if self.search_algorithm == 'BFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_bfs(x, k, T, b)
# print(f'result: {result}')
# if result:
# return result
# elif self.search_algorithm == 'DFS':
# while timeout is None or time.time() - start_time < timeout:
# result = self.tot_dfs(x, k, T, vth,
# confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
# print(f'result: {result}')
# if result:
# return result
# else:
# raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
# #------------------------------------> v1 end
# #v2
# # class OptimizedTreeofThoughts(TreeofThoughts):
# # def __init__(self, model, search_algorithm):
# # super().__init__(model, search_algorithm)
# # self.model = model
# # self.tree = {
# # "thoughts": [],
# # "states": []
# # }
# # self.metrics = {
# # "x": [],
# # "vth": [],
# # # "evaluations "
# # "b": [],
# # "k": [],
# # "new_best_value": [],
# # "result": [],
# # "search_algorithm": search_algorithm
# # }
# # def parallel_generate_thoughts(self, states, k):
# # with concurrent.futures.ThreadPoolExecutor() as executor:
# # thoughts = list(executor.map(lambda state: self.model.generate_thoughts(state, k), states))
# # print(f"Parallel generated thoughts: {thoughts}")
# # return thoughts
# # def parallel_evaluate_states(self, states, initial_prompt):
# # with concurrent.futures.ThreadPoolExecutor() as executor:
# # state_values = list(executor.map(lambda state: self.model.evaluate_states(state, initial_prompt), states))
# # print(f"Parallel evaluated state values: {state_values}")
# # return state_values
# # def tot_bfs(self, x, k, T, b):
# # S0 = {x}
# # for t in range(1, T + 1):
# # S0_t = {(*s, z) for s in S0 for z in self.model.parallel_generate_thoughts(s, k)}
# # Vt = self.model.parallel_evaluate_states(S0_t)
# # St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
# # S0 = set(St)
# # return self.model.generate_thoughts(max(St, key=lambda s: Vt[s]), 1)
# # def tot_dfs(self, x, k, T, vth):
# # output = []
# # def dfs(s, t):
# # if t > T:
# # output.append(self.model.generate_thoughts(s, 1))
# # return
# # for s_prime in sorted(self.model.generate_thoughts(s, k)):
# # if self.model.evaluate_states({s_prime})[s_prime] > vth:
# # dfs((*s, s_prime), t + 1)
# # dfs(x, 1)
# # return output
# # def save_tree_to_json(self, file_name):
# # output_data = {
# # "tree": self.tree,
# # "metrics": self.metrics
# # }
# # os.makedirs(os.path.dirname(file_name), exist_ok=True)
# # with open(file_name, 'w') as json_file:
# # json.dump(output_data, json_file, indent=4)
# # def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
# # start_time = time.time()
# # file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
# # try:
# # if self.search_algorithm == 'BFS':
# # while timeout is None or time.time() - start_time < timeout:
# # result = self.tot_bfs(x, k, T, b)
# # print(f'result: {result}')
# # if result:
# # self.metrics["result"].append(result)
# # self.metrics["x"].append(x)
# # self.metrics["k"].append(k)
# # self.metrics["T"].append(T)
# # self.metrics["b"].append(b)
# # self.metrics["vth"].append(vth)
# # self.save_tree_to_json(file_name)
# # return result
# # elif self.search_algorithm == 'DFS':
# # while timeout is None or time.time() - start_time < timeout:
# # result = self.tot_dfs(x, k, T, vth, dynamic_pruning=True, early_stopping=True, early_stopping_threshold=0.001)
# # print(f'result: {result}')
# # if result:
# # self.metrics["result"].append(result)
# # self.metrics["x"].append(x)
# # self.metrics["k"].append(k)
# # self.metrics["T"].append(T)
# # self.metrics["b"].append(b)
# # self.metrics["vth"].append(vth)
# # self.save_tree_to_json(file_name)
# # return result
# # else:
# # raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
# # except KeyboardInterrupt:
# # print("Keyboard interrupt detected.")
# # finally:
# # print("Saving the current tree and metrics.")
# # self.save_tree_to_json(file_name)
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
self.tree = {
"nodes": [],
"metrics": {
"thoughts": [],
"evaluations": []
}
}
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
try:
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
if result:
self.save_tree_to_json(file_name)
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth)
if result:
self.save_tree_to_json(file_name)
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
except KeyboardInterrupt:
print("Keyboard interrupt detected.")
finally:
print("Saving the current tree and metrics.")
self.save_tree_to_json(file_name)
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = set()
for s in S0:
for z in self.model.generate_thoughts(s, k):
if (type(s) == str):
S0_t.add((s, z))
else:
S0_t.add((*s, z))
Vt = self.model.evaluate_states(S0_t, x)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
# print(f'S0_t: {S0_t} Vt: {Vt} St: {St} S0: {S0}')
#store thoughts evaluations and parent nodes in a json file
for s in S0_t:
self.tree['nodes'].append(s)
self.tree["metrics"]["thoughts"].append(s[-1])
self.tree["metrics"]["evaluations"].append(Vt[s])
best_state = max(St, key=lambda s: Vt[s])
solution = self.model.generate_solution(x, best_state)
return solution
def tot_dfs(self, x, k, T, vth, pruning_threshold=0.5, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
output = []
iteration_count = 0
consecutive_convergence_count = 0
prev_best_value = None
def dfs(s, t):
nonlocal consecutive_convergence_count, prev_best_value, iteration_count
if t > T:
thought = self.model.generate_thoughts(s, 1)
value = self.model.evaluate_states({s}, x)[s]
output.append((thought, value))
if confidence_threshold is not None and value >= confidence_threshold:
return True
if prev_best_value is not None and convergence_threshold is not None:
if abs(value - prev_best_value) < convergence_threshold:
consecutive_convergence_count += 1
else:
consecutive_convergence_count = 0
prev_best_value = value
iteration_count += 1
if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
return True
return False
for s_prime in sorted(self.model.generate_thoughts(s, k)):
state_value = self.model.evaluate_states({s_prime}, x)[s_prime]
if state_value > vth and (pruning_threshold is None or state_value >= pruning_threshold):
if dfs((*s, s_prime), t + 1):
return True
return False
dfs(x, 1)
return max(output, key=lambda x: x[1]) if output else None
def save_tree_to_json(self, file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
#does not output state after each thought --- idk why -- needs work
class OptimizedTreeofThoughts(TreeofThoughts):
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
print(f'Start time {start_time}')
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
print(f'resultttt in optimized tree of thoughts: {result}')
if result:
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth, confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
if result:
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
if __name__ == '__main__':
search_algorithm = "DFS"
strategy = "cot"
evaluation_strategy="vote"
#create instance
model = OptimizedOpenAILanguageModel('', api_model="gpt-3.5-turbo")
tree_of_thoughts = TreeofThoughts(model, search_algorithm)
input_problem = "use 4 numbers and basic arithmetic operations (+-*/) to obtain 24"
k = 1#number of thoughts to input
T = 5 # maximum depth of the search tree
b = 14 # branching factor -< number of child nodes for each branch
vth = 0.8 # pruning state -> any evaluated thought below this is eliminated
timeout = 10 #10 seconds timeout before stop
confidence = 0.8 #cmodel is confident on performance
max_iterations = 40 #tree branch nodes
convergence_threshold = 0.01 #determining when the search process has converged
convergence_count = 5 # number of searchers to be considered converged
#read documentation for more
#call the solve emthod with the input problem and other params
solution = tree_of_thoughts.solve(input_problem, k, T, b, vth, timeout, confidence, max_iterations, convergence_threshold, convergence_count)
#use the solution in yes
print(f"solution: {solution}")
# # Save the tree and metrics to a JSON file
# file_name = "logs/tree_of_thoughts_output.json"
# tree_of_thoughts.save_tree_to_json(file_name) | [
"Given the current state of reasoning: \n\n\n'PLACEHOLDER'\n\n\n, pessimistically evaluate its value as a float between 0 and 1 based on its potential to achieve PLACEHOLDER",
"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: 'PLACEHOLDER', generate PLACEHOLDER coherent solutions to achieve",
"Given the current state of reasoning: \n\n\n'PLACEHOLDER'\n\n\nGenerate the next best coherent thought to achieve the reasoning process and get the solution: ",
"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: 'PLACEHOLDER', generate PLACEHOLDER coherent solutions to achieve PLACEHOLDER",
"Given the current state of reasoning: 'PLACEHOLDER', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing PLACEHOLDER and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT",
"Given the current state of reasoning: 'PLACEHOLDER', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve PLACEHOLDER",
"Given the following reasoning: \n\n\n'PLACEHOLDER'\n Give me the best solution you can think of the task : PLACEHOLDER",
"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\nPLACEHOLDER\n\nVote, on the probability of this state of reasoning achieveing PLACEHOLDER and become very pessimistic very NOTHING ELSE"
] |
2024-01-10 | evanmeeks/tree-of-thoughts | experiements~main.py |
import concurrent.futures
from abc import ABC, abstractmethod
import openai
import os
import re
import guidance
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline
import heapq
import json
DATA_PATH = './data'
import logging
import argparse
from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
class CustomLanguageModel(AbstractLanguageModel):
def __init__(self, model):
self.model = model
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
pass
def evaluate_states(self, states):
#implement state evaluation logic using self.model
pass
class HuggingLanguageModel(AbstractLanguageModel):
def __init__(self, model_name, model_tokenizer=None, verbose=False):
self.model = AutoModelForCausalLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_tokenizer or model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve {state_text}"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, inital_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve {inital_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, num_return_sequences=1, max_length=max_length)
value_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
value = float(value_text)
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
class HFPipelineModel(AbstractLanguageModel):
def __init__(self, model_name, verbose=False):
self.model_name = model_name
self.pipeline = pipeline("text-generation", model=model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(input_ids=inputs["input_ids"], max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimistically evaluate its value as a float between 0 and 1 based on its potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
generated_outputs = self.pipeline(prompt, max_length=max_length, num_return_sequences=1)
value_text = generated_outputs[0]["generated_text"]
value = float(value_text)
print(f'value {value}')
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values
@staticmethod
def load(model_nmae, verbose=False):
return HFPipelineModel(model_name, verbose)
class OpenAILanguageModel(AbstractLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=True):
env_tree = os.getenv("OPENAI_API_KEY")
if api_key == "" or api_key == None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base == None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model == None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = "Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx'."
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
return text
def generate_text(self, prompt, k):
if self.use_chat_api:
thoughts = []
for _ in range(k):
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
print(f'thoughts: {thoughts}')
return thoughts
else:
response = self.openai_api_call_handler(prompt, 50, 0.5, k)
thoughts = [self.openai_choice2text_handler(choice) for choice in response.choices]
return thoughts
def generate_thoughts(self, state, k, inital_prompt):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
# prompt = f"Given the current state of reasoning: \n\n\n'{state_text}'\n\n\nGenerate the next best coherent thought to achieve the reasoning process and get the solution: "
# prompt = f"Based on the current state of reasoning: \n\n\n'{state_text} Provide the next coherent thought that will help progress the reasoning process and reach an soluton "
# prompt = f"These are the thoughts you've had: \n\n\n{state_text}, provide the next coherent thought that will help advance the reasoning process and reach an solution for this problem {inital_prompt}. Think sharply, think out of the box, predict failure. Do not leave any open questions. Unleash your mind."
prompt = f"Considering the thoughts you've had until now:\n\n{state_text}\n\nDevise the next coherent thought that will aid in advancing the reasoning process and achieving a solution to {inital_prompt}. Assess various scenarios, think unconventionally, anticipate potential challenges, and resolve any outstanding queries. Tap into your mind's full potential and make certain no open questions remain."
prompt += self.ReAct_prompt
print(prompt)
thoughts = self.generate_text(prompt, k)
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self, initial_prompt, state):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
prompt = f"Considering the reasoning provided:\n\n'{state_text}'\n\nDevise the best possible solution for the task: {initial_prompt}"
answer = self.generate_text(prompt, 1)
# print(thoughts)
print(f"General solution : {answer}")
return answer
def evaluate_states(self, states, inital_prompt):
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
prompt = f"Given the current state of reasoning: '{state_text}', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing {inital_prompt} and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT"
response = self.openai_api_call_handler(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
print(f'state: {value_text}')
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
prompt = f"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\n{states_text}\n\nVote, on the probability of this state of reasoning achieveing {inital_prompt} and become very pessimistic very NOTHING ELSE"
response = self.openai_api_call_handler(prompt, 50, 1)
print(f'state response: {response}')
best_state_text = self.openai_choice2text_handler(response.choices[0])
print(f"Best state text: {best_state_text}")
best_state = tuple(best_state_text.split())
print(f'best_state: {best_state}')
return {state: 1 if state == best_state else 0 for state in states}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
# def solution(self, states, initial_prompt):
class OptimizedOpenAILanguageModel(OpenAILanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", cache_enabled=True, api_base="", api_model="", enable_ReAct_prompting=False):
super().__init__(api_key, strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting)
self.cache_enabled = cache_enabled
self.thought_cache = {}
self.state_evaluation_cache = {}
def parallel_generate_thoughts(self, states, k):
with concurrent.futures.ThreadPoolExecutor() as executor:
thoughts = list(executor.map(lambda state: self.generate_thoughts(state, k), states))
print(f"Parallel generated thoughts: {thoughts}")
return thoughts
def parallel_evaluate_states(self, states, inital_prompt):
with concurrent.futures.ThreadPoolExecutor() as executor:
state_values = list(executor.map(self.evaluate_states, states, inital_prompt))
print(f"Parallel evaluated state values: {state_values}")
return state_values
class GuidanceLanguageModel(AbstractLanguageModel):
def __init__(self, model, strategy="cot", evaluation_strategy="value", enable_ReAct_prompting=False):
# gpt4 = guidance.llms.OpenAI("gpt-4")
# vicuna = guidance.llms.transformers.Vicuna("your_path/vicuna_13B", device_map="auto")
self.model = model
# reference : https://www.promptingguide.ai/techniques/react
self.ReAct_prompt = ''
if enable_ReAct_prompting:
self.ReAct_prompt = '''{{#assistant~}}
{{gen 'Observation' temperature=0.5 max_tokens=50}}
{{~/assistant}}'''
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
self.thoughts_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Generate {{k}} coherent thoughts as short as possible to continue the reasoning process.
Don't answer the question yet.
{{~/user}}
%s
{{#assistant~}}
{{gen 'Thoughts' temperature=0.5 max_tokens=50}}
{{~/assistant}}
''' % self.ReAct_prompt, llm=self.model)
self.value_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the current state of reasoning:
{{state_text}}
Evaluate its value as a float between 0 and 1, and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Value' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
self.vote_program = guidance('''
{{#system~}}
You are a logical and rational assistant.
{{~/system}}
{{#user~}}
Given the following states of reasoning, vote for the best state:
{{states_text}}
Give the index of your voted best state(the 1st state has index 0), and NOTHING ELSE
Don't answer the question yet.
{{~/user}}
{{#assistant~}}
{{gen 'Vote' temperature=1 max_tokens=10}}
{{~/assistant}}
''', llm=self.model)
def model_response_handler(self, program, **kargs):
print("Calling guidance model(Modify Me to handle specific LLM response excpetions!)")
reponse = program(**kargs)
return reponse
def generate_thoughts(self, state, k):
#implement the thought generation logic using self.model
state_text = ' '.join(state)
thoughts = []
for _ in range(k):
response = self.model_response_handler(self.thoughts_program, state_text=state_text, k=1)
text = response['Thoughts']
thoughts += [text]
# print(thoughts)
print(f"Generated thoughts: {thoughts}")
return thoughts
def evaluate_states(self, states):
#implement state evaluation logic using self.model
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
state_text = ' '.join(state)
response = self.model_response_handler(self.value_program, state_text=state_text)
try:
value_text = response['Value']
print(f"Value text {value_text}")
value = float(value_text)
print(f"value: {value}")
except ValueError:
value = 0 # Assign a default value if the conversion fails
state_values[state] = value
return state_values
elif self.evaluation_strategy == 'vote':
states_text = '\n'.join([' '.join(state) for state in states])
response = self.model_response_handler(self.vote_program, states_text=states_text)
best_state_text = response['Vote']
print(f"Best state text: {best_state_text}")
best_state = int(best_state_text)
return {state: 1 if i == best_state else 0 for i in range(len(states))}
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class GuidanceOpenAILanguageModel(GuidanceLanguageModel):
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False):
if api_key == "" or api_key == None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base == None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model == None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
super().__init__(guidance.llms.OpenAI(self.api_model), strategy, evaluation_strategy, enable_ReAct_prompting)
def model_response_handler(self, program, **kargs):
error_msg = ''
while True:
try:
program.llm.max_retries = 60
guidance.llms.OpenAI.cache.clear()
response = program(**kargs)
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
except Exception as e:
if str(e) == f'''Too many (more than {guidance.llm.max_retries}) OpenAI API RateLimitError's in a row!''':
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
else:
error_msg = str(e)
break
raise Exception(error_msg)
class TreeofThoughts:
"""
1. Thought Decomposition --> based on problem properties
2. Thought Generator -> create a thought generator function G(p0, s, k) with 2 strategies a sample iid thoughts from a cot prompt b. propose thoughts
sequentially using a propose prompt
3. create a state evaluator function V(p0, S) with 2 strategies a value each state independently b. vote across states
4. Choose a search algo based on tree structure [BFS or DFS]
Implement chosen search algorithm for bfs (algo1):
init S0 with the input x
for t = 1 to T (step limit):
generate candidate thoughts for each state in St-1
eveluate the candiate states using the state evaluator V
select the b most promising states for St
return the final output by genertaing the thought for the best state in St for DFS(algo2)
defien a recurseive DFS function with the current state s, step t, and other required params
if t > T record the output by generating the thought for current state S
for each candidate state s in the sorted list of generated thoughts for s:
if the evaluated value of s is greater the the threshold of vth call the dfs function recursively
with s and t + 1
execute the chosen search algo with the input problem, thought generator, and state evaluator, and other required params
"""
def __init__(self, model, search_algorithm):
self.model = model
self.search_algorithm = search_algorithm
self.tree = {
"nodes": [],
"metrics": {
"thoughts": [],
"evaluations": []
}
}
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
try:
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
if result:
self.save_tree_to_json(file_name)
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth)
if result:
self.save_tree_to_json(file_name)
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
except KeyboardInterrupt:
logger.error("Keyboard interrupt detected.")
except ValueError as e:
logger.error(f"Error: {e}")
finally:
logger.info("Saving the current tree and metrics.")
self.save_tree_to_json(file_name)
def tot_bfs(self, x, k, T, b):
S0 = {x}
for t in range(1, T + 1):
S0_t = set()
for s in S0:
for z in self.model.generate_thoughts(s, k, x):
if (type(s) == str):
S0_t.add((s, z))
else:
S0_t.add((*s, z))
Vt = self.model.evaluate_states(S0_t, x)
St = sorted(S0_t, key=lambda s: Vt[s], reverse=True)[:b]
S0 = set(St)
logger.info(f'Step: {t}, S0_t: {S0_t}, Vt: {Vt}, St: {St}, S0: {S0}')
best_state = max(St, key=lambda s: Vt[s])
return best_state
def tot_dfs(self, x, k, T, vth, pruning_threshold=0.5, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
output = []
iteration_count = 0
consecutive_convergence_count = 0
prev_best_value = None
file_name = f"logs/tree_of_thoughts_output_{self.search_algorithm}.json"
def dfs(s, t):
nonlocal consecutive_convergence_count, prev_best_value, iteration_count, output
if t > T:
thought = self.model.generate_thoughts(s, 1, x)
print(f'thoughts inside dfs {thought}')
value = self.model.evaluate_states({s}, x)[s]
print(f'values inside dfs {value}')
output.append((thought, value))
print(f'output {output}')
if confidence_threshold is not None and value >= confidence_threshold:
return True
if prev_best_value is not None and convergence_threshold is not None:
if abs(value - prev_best_value) < convergence_threshold:
consecutive_convergence_count += 1
else:
consecutive_convergence_count = 0
prev_best_value = value
iteration_count += 1
if (max_iterations is not None and iteration_count >= max_iterations) or (convergence_count is not None and consecutive_convergence_count >= convergence_count):
return True
return False
for s_prime in sorted(self.model.generate_thoughts(s, k, x)):
state_value = self.model.evaluate_states({s_prime}, x)[s_prime]
logger.info(f"State: {s_prime}, Value: {state_value}")
if state_value > vth and (pruning_threshold is None or state_value >= pruning_threshold):
if (type(s) == str):
child = (s, s_prime)
else:
child = (*s, s_prime)
# self.tree['nodes'][child] = s
# self.tree["metrics"]["thoughts"][child] = s_prime
# self.tree["metrics"]["evaluations"][child] = state_value
if dfs(child, t + 1):
return True
self.save_tree_to_json(file_name)
return False
dfs(x, 4)
print(f'output {output}')
best_state = max(output, key=lambda x: x[1])
return best_state[0]
def save_tree_to_json(self, file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as json_file:
json.dump(self.tree, json_file, indent=4)
def print_tree(self, x, node=None, depth=0):
if node is None:
node = self.tree["nodes"][x]
thought = self.tree["metrics"]["thoughts"][node]
evaluation = self.tree["metrics"]["evaluations"][node]
tree_info = {
"node": node,
"thought": thought,
"evaluation": evaluation,
"children": []
}
for child, parent in self.tree["nodes"].items():
if parent == node:
child_info = self.print_tree(child, depth + 1)
tree_info["children"].append(child_info)
return tree_info
#does not output state after each thought --- idk why -- needs work
class OptimizedTreeofThoughts(TreeofThoughts):
def solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None):
start_time = time.time()
print(f'Start time {start_time}')
if self.search_algorithm == 'BFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_bfs(x, k, T, b)
print(f'resultttt in optimized tree of thoughts: {result}')
if result:
return result
elif self.search_algorithm == 'DFS':
while timeout is None or time.time() - start_time < timeout:
result = self.tot_dfs(x, k, T, vth, confidence_threshold=confidence_threshold, max_iterations=max_iterations, convergence_threshold=convergence_threshold, convergence_count=convergence_count)
if result:
return result
else:
raise ValueError("Invalid search algorithm. Choose 'BFS' or 'DFS'.")
if __name__ == '__main__':
search_algorithm = "DFS"
strategy = "cot"
evaluation_strategy="vote"
#create instance
model = OptimizedOpenAILanguageModel('', api_model="gpt-3.5-turbo")
tree_of_thoughts = OptimizedTreeofThoughts(model, search_algorithm)
# input_problem = "use 4 numbers and basic arithmetic operations (+-*/) to obtain 24"
parser = argparse.ArgumentParser(description="Tree of Thoughts Solver")
parser.add_argument("--problem", type=str, required=True, help="Initial problem statement")
parser.add_argument("--search_algorithm", type=str, choices=["BFS", "DFS"], default="BFS", help="Search algorithm to use (BFS or DFS)")
parser.add_argument("--k", type=int, default=3, help="Number of thoughts to generate")
parser.add_argument("--T", type=int, default=10, help="Step limit")
parser.add_argument("--b", type=int, default=5, help="Number of most promising states")
parser.add_argument("--vth", type=float, default=0.4, help="Value threshold for DFS")
parser.add_argument("--timeout", type=int, default=10, help="Timeout in seconds before stopping")
parser.add_argument("--confidence", type=float, default=0.8, help="Model confidence threshold")
parser.add_argument("--max_iterations", type=int, default=40, help="Maximum number of tree branch nodes")
parser.add_argument("--convergence_threshold", type=float, default=0.01, help="Convergence threshold for the search process")
parser.add_argument("--convergence_count", type=int, default=5, help="Number of searches to be considered converged")
args = parser.parse_args()
#solve the problem using the tree of thoughts class
optimized_tree_of_thoughts = OptimizedTreeofThoughts(model, search_algorithm=args.search_algorithm)
#solve the porblem using tree of thoughts problem helper
best_state = optimized_tree_of_thoughts.solve(args.problem, k=args.k, T=args.T, b=args.b, vth=args.vth)
#generate the final silution
final_solution = optimized_tree_of_thoughts.model.generate_solution(best_state, args.problem)
#print the final solutions
print(f"Final solution: {final_solution}")
trees = optimized_tree_of_thoughts.print_tree(final_solution)
# tree_of_thoughts.print_tree(final_solution)
#generate solution prompt --> give me an solution is right now -> get_final_answer that takes into account the best state and provides the response
#python tree_of_thoughts.py --problem "Design a new transportation system for a city" --search_algorithm BFS --k 5 --T 3 --b 5 --vth 0.5 --timeout 10 --confidence 0.8 --max_iterations 40 --convergence_threshold 0.01 --convergence_count 5
| [
"Given the current state of reasoning: 'PLACEHOLDER', pessimistically evaluate its value as a float between 0 and 1 based on its potential to achieve PLACEHOLDER",
"Considering the reasoning provided:\n\n'PLACEHOLDER'\n\nDevise the best possible solution for the task: PLACEHOLDER",
"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: 'PLACEHOLDER', generate PLACEHOLDER coherent solutions to achieve",
"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: 'PLACEHOLDER', generate PLACEHOLDER coherent solutions to achieve PLACEHOLDER",
"Given the current state of reasoning: 'PLACEHOLDER', evaluate its value as a float between 0 and 1, become very pessimistic think of potential adverse risks on the probability of this state of reasoning achieveing PLACEHOLDER and DO NOT RESPOND WITH ANYTHING ELSE: OTHER THAN AN FLOAT",
"Given the current state of reasoning: 'PLACEHOLDER', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve PLACEHOLDER",
"Considering the thoughts you've had until now:\n\nPLACEHOLDER\n\nDevise the next coherent thought that will aid in advancing the reasoning process and achieving a solution to PLACEHOLDER. Assess various scenarios, think unconventionally, anticipate potential challenges, and resolve any outstanding queries. Tap into your mind's full potential and make certain no open questions remain.",
"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\nPLACEHOLDER\n\nVote, on the probability of this state of reasoning achieveing PLACEHOLDER and become very pessimistic very NOTHING ELSE"
] |
2024-01-10 | CoslaDigital/ConsulExperimental | public~machine_learning~scripts~moderate.py | #!/usr/bin/python
# moderation logic
# select comment, return id and body text
# moderate body text
# if flagged is true
# update flag_count to send comment for moderation
#
# still to be done: if flagged is true hide comment?
# don't moderate comment which has been previously moderated?
# hide if flagged but flag if score is above threshold?
# auto hide if certain categories have scores
# make the above user configurable
# update notifications for moderator
# send email to user/moderator
#
# move moderation to function
# provide option to use different moderation engines
# log moderation output to log file
# Done
# move openai key to config file
# automate running of script
# allow manual running of script from machine learning scripts folder
import psycopg2
import json
import os
import openai
from configparser import ConfigParser
from datetime import datetime
now = datetime.now()
thresh = 0.15
configfile="moderate.ini"
path_dir = "/home/deploy/consul/current/public/machine_learning/scripts"
path_file = os.sep.join([path_dir, configfile])
print(now,"starting moderation")
print("path is ",path_file)
def config(filename=path_file, section='postgresql'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
def update_flag(id, flags):
""" update flag count based on the modrated flag """
sql = """ UPDATE comments
SET flags_count = flags_count + %s
WHERE id = %s"""
conn = None
updated_rows = 0
try:
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
# create a new cursor
cur = conn.cursor()
# execute the UPDATE statement
cur.execute(sql, (flags, id))
# get the number of updated rows
updated_rows = cur.rowcount
# Commit the changes to the database
conn.commit()
print(updated_rows)
# Close communication with the PostgreSQL database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return updated_rows
def hide_comment(id, ):
""" update flag count based on the modrated flag """
sql = """ UPDATE comments
SET hidden_at = LOCALTIMESTAMP
WHERE id = %s"""
conn = None
updated_rows = 0
try:
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
# create a new cursor
cur = conn.cursor()
# execute the UPDATE statement
cur.execute(sql, (id,))
# get the number of updated rows
updated_rows = cur.rowcount
# Commit the changes to the database
conn.commit()
print(updated_rows)
# Close communication with the PostgreSQL database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return updated_rows
def get_records():
# read connection parameters
params = config()
# GET THE CONNECTION OBJECT
conn = psycopg2.connect(**params)
# CREATE A CURSOR USING THE CONNECTION OBJECT
curr = conn.cursor()
# EXECUTE THE SQL QUERY: get all comments newer than now - 12 hours
query = "select comments.id,body,comments.hidden_at,flags_count,ancestry,commentable_type from comments left join comment_translations on comments.id=comment_translations.id where comments.created_at > now() - interval '12 hours';"
#will want to refine this query not to select records previouslyrestored by moderator
curr.execute(query)
# FETCH ALL THE ROWS FROM THE CURSOR
data = curr.fetchall()
rows=0
# PRINT THE RECORDS
for row in data:
commentid=row[0]
body=row[1]
print("comment id",commentid,"body ",body)
rows=rows+1
# moderate
response=openai.Moderation.create(
input=body,
)
print("is this bodytext to be moderated")
status = response["results"][0]["flagged"]
print(status)
scores = response["results"][0]["category_scores"]
print(scores)
flags=0
for index, (cat,score) in enumerate(scores.items()):
if score > thresh:
flags = flags + 2
print("category",cat," score",score)
if status :
#flagged status from moderation is true
print(response["results"])
hide_comment(commentid )
# need to add something here to notify moderator
# also need to make decisions about flag vs hide
else:
#loop through values andif above threshhold - flag comment
print("no comments hidden")
# if flag score is more than 0 update flag field on comment id
if flags > 0 :
print("adding flag score ", flags, " to comment ",commentid)
update_flag(commentid, flags)
# CLOSE THE CONNECTION
conn.close()
return rows
if __name__ == '__main__':
# connect()
get_records()
# Update flag
flags = 0
print(now,"stopping moderation")
# print(output)
| [] |
2024-01-10 | Davidpazn/DJIA-Direction-Prediction | src~nlp_utils~topic_modelling.py | #!/usr/bin/env python3
# coding: utf-8
__author__ = "David Pacheco Aznar"
__email__ = "[email protected]"
# The aim of this script is to build a topic modeller using HDP and BERT.
from src.nlp_utils.preprocessing import install
# Data manipulation
import numpy as np
from collections import Counter
# sklearn
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import silhouette_score # get sense of topic separation
# umap
install('umap-learn')
import umap.umap_ as umap
# plotting imports
import matplotlib.pyplot as plt
# gensim
import gensim
from gensim import corpora
from gensim.utils import simple_preprocess
# Hierarchical Dirichlet Process model to infer best number of LDA clusters
from gensim.models import HdpModel
from gensim.models import CoherenceModel
from gensim.test.utils import common_corpus, common_dictionary
# ### Transformers with BERT
# install('sentence-transformers')
# from sentence_transformers import SentenceTransformer
# model_bert = SentenceTransformer('bert-base-nli-max-tokens')
# ##############################################################################
# ####################### TOPIC MODELLING FUNCTIONS ############################
# ##############################################################################
# ################################# HDP ########################################
def train_hdp(corpus):
id2word = corpora.Dictionary(corpus)
new_corpus = [id2word.doc2bow(text) for text in corpus]
hdp = HdpModel(corpus=new_corpus, id2word=id2word)
return hdp, new_corpus
# ############################# DOCUMENT TOPIC #################################
def get_document_topic_lda(model, corpus, k):
n_doc = len(corpus)
doc_topic_mapping = np.zeros((n_doc, k))
for i in range(n_doc):
for topic, prob in model.get_document_topics(corpus[i]):
doc_topic_mapping[i, topic] = prob
return doc_topic_mapping
def lda_main_topic(lda, corpus):
labels_lda = []
for line in corpus:
line_labels = sorted(lda.get_document_topics(line), key=lambda x: x[1], reverse=True)
top_topic = line_labels[0][0]
labels_lda.append(top_topic)
return labels_lda
# ############################## CLUSTERING ####################################
def predict_topics_with_kmeans(embeddings,num_topics):
kmeans_model = KMeans(num_topics)
kmeans_model.fit(embeddings)
topics_labels = kmeans_model.predict(embeddings)
return topics_labels
def reduce_umap(embedding):
reducer = umap.UMAP() #umap.UMAP()
embedding_umap = reducer.fit_transform( embedding )
return embedding_umap
def reduce_pca(embedding):
pca = PCA(n_components=2)
reduced = pca.fit_transform( embedding )
print( "pca explained_variance_ ",pca.explained_variance_)
print( "pca explained_variance_ratio_ ",pca.explained_variance_ratio_)
return reduced
def reduce_tsne(embedding):
tsne = TSNE(n_components=2)
reduced = tsne.fit_transform( embedding )
return reduced
# ############################## PLOTTING ######################################
def plot_embeddings(embedding, labels,title):
labels = np.array( labels )
distinct_labels = set( labels )
n = len(embedding)
counter = Counter(labels)
for i in range(len( distinct_labels )):
ratio = (counter[i] / n )* 100
cluster_label = f"cluster {i}: { round(ratio,2)}"
x = embedding[:, 0][labels == i]
y = embedding[:, 1][labels == i]
plt.plot(x, y, '.', alpha=0.4, label= cluster_label)
# plt.legend(title="Topic",loc = 'upper left', bbox_to_anchor=(1.01,1))
plt.title(title)
| [] |
2024-01-10 | jasonmassey/writegoodcomments | writegoodcomments.py | #!/usr/bin/env python3
import os
import sys
import argparse
import getpass
import openai
# Constants
CONFIG_FILE_PATH = os.path.expanduser("~/.writegoodcomments")
# Set up OpenAI API key
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Utility functions
def get_openai_api_key():
api_key = input("Enter your OpenAI API key: ")
with open(CONFIG_FILE_PATH, "w") as config_file:
config_file.write(f"OPENAI_API_KEY={api_key}\n")
def read_config():
if os.path.exists(CONFIG_FILE_PATH):
with open(CONFIG_FILE_PATH, "r") as config_file:
for line in config_file:
key, value = line.strip().split("=")
if key == "OPENAI_API_KEY":
return value
return None
def get_file_extension(file_path):
_, ext = os.path.splitext(file_path)
return ext.lower()
def generate_comments(code, signature):
# Use OpenAI API to generate comments
response = openai.Completion.create(
engine="davinci",
prompt=code,
max_tokens=100,
temperature=0.7
)
return signature + response.choices[0].text
def process_file(file_path, signature):
with open(file_path, "r") as file:
code = file.read()
comments = generate_comments(code, signature)
with open(file_path, "w") as file:
file.write(comments)
def main():
parser = argparse.ArgumentParser(description="Generate detailed comments for code files using OpenAI.")
parser.add_argument("-r", "--recursive", action="store_true", help="Recursively process code files.")
parser.add_argument("-changesig", type=str, help="Change the comment signature.")
parser.add_argument("files", nargs="*", help="List of code files to process.")
args = parser.parse_args()
if not args.files:
parser.print_help()
return
openai_api_key = read_config()
if openai_api_key is None:
get_openai_api_key()
signature = "j--"
if args.changesig:
signature = args.changesig
for file_path in args.files:
if os.path.isfile(file_path) and get_file_extension(file_path) in [".c", ".cpp", ".h", ".java", ".js"]:
process_file(file_path, signature)
if args.recursive:
for root, _, files in os.walk("."):
for file in files:
file_path = os.path.join(root, file)
if get_file_extension(file_path) in [".c", ".cpp", ".h", ".java", ".js"]:
process_file(file_path, signature)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | Timevdo/CS505-Final-Project | congress~filter_transcripts.py | import json
import random
import string
import openai
from tqdm import tqdm
# filter transcripts to find transcripts that are debating a *specific* bill
# start by loading the transcripts
with open("./daterangegovinfo02.json") as f:
data = json.load(f)
# pick a random 1000 transcripts to work with
# seed the randomness with a fixed value so that we can reproduce the results
random.seed(0)
previous_run_transcripts = random.sample(data, 300)
random.seed(0)
transcripts = random.sample(data, 1000)
# check that the first 300 transcripts are the same in both lists
for i in range(300):
if previous_run_transcripts[i] != transcripts[i]:
print("ERROR: random samples are not equal")
print("previous_run_transcripts[i]: " + str(previous_run_transcripts[i]))
print("transcripts[i]: " + str(transcripts[i]))
print("i: " + str(i))
exit(1)
# fix whitespace in transcripts
# replace \n with newlines
for t in transcripts:
t['transcript'] = t['transcript'].replace("\\n", "\n")
# filter out transcripts that are less than 800 characters long (likely not a debate)
transcripts = [t for t in transcripts if len(t['transcript']) > 800]
# filter out transcripts that are less than 1500 characters long and contain the phrase
# "Congress has the power to enact this legislation pursuant to the following:"
# (ignore whitespace)
no_whitespace = {ord(c): None for c in string.whitespace}
transcripts = [t for t in transcripts if len(t['transcript']) > 1500 or
"Congress has the power to enact this legislation pursuant to the following:".translate(no_whitespace)
not in t['transcript'].translate(no_whitespace)]
# while True:
# print("\n" * 100)
# item = random.choice(data)
#
# # replace \n with newlines
# transcript = item['transcript'].replace("\\n", "\n")
# print(transcript)
# input("Press enter for another transcript, or ctrl+c to exit")
# ***********************************************************************************
# Now we ship these off to the GPT-3 instruct api to see if it's a debate about a bill
# we don't ask *which* bill @ this point, just if it's a debate about a bill
client = openai.OpenAI(api_key="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")
base_prompt = "This is an excerpt from a transcript from the United States Congress. Is this a debate about a bill? Answer \"TRUE\" or \"FALSE\".\n"
def get_answer(transcript, model):
response = client.chat.completions.create(
model=model,
messages=[
{
"role": "system",
"content": base_prompt
},
{"role": "user", "content": t['transcript']}
],
)
answer = response.choices[0].message.content
if answer != "TRUE" and answer != "FALSE":
print("ERROR: unexpected answer from GPT: " + answer)
return None
return answer == "TRUE"
continue_from = 300
for t in tqdm(transcripts[continue_from:]):
try:
answer = get_answer(t['transcript'], "gpt-3.5-turbo-16k")
except:
try:
answer = get_answer(t['transcript'], "gpt-4-1106-preview")
except:
answer = None
if answer is None:
with open("debate_transcripts_unknown.json", "a") as f:
json.dump(t, f)
f.write("\n")
if answer:
# print(t['transcript'])
# input("Press enter for another transcript, or ctrl+c to exit")
with open("debate_transcripts.json", "a") as f:
json.dump(t, f)
f.write("\n")
| [
"This is an excerpt from a transcript from the United States Congress. Is this a debate about a bill? Answer \"TRUE\" or \"FALSE\".\n",
"transcript"
] |
2024-01-10 | nagarajRPoojari/RAG-LLM | src~RagLLM~pipeline.py | from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Pinecone
import pinecone
import os
os.environ['OPENAI_API_KEY']="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"
os.environ['PINECONE_ENV']="us-west4-gcp-free"
os.environ['PINECONE_API_KEY']="6b699f4f-5b1d-471a-adbe-918747981c1b"
class Pinecone_client:
def __init__(self,embeddings):
PINECONE_API_KEY=os.environ['PINECONE_API_KEY']
PINECONE_ENV=os.environ['PINECONE_ENV']
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENV)
index_name = "neet-bot"
if index_name not in pinecone.list_indexes():
pinecone.create_index(name=index_name, metric="cosine", dimension=768) # Adjust the dimension as per your document representation
self.index = pinecone.Index(index_name)
self.embeddings=embeddings
def get_retriever(self):
docsearch=Pinecone(self.index, self.embeddings.embed_query, "text")
return docsearch
class LLM:
def __init__(self):
self.embeddings=HuggingFaceEmbeddings()
api_key=os.environ['OPENAI_API_KEY']
self.model=ChatOpenAI(temperature=0,model_name='gpt-3.5-turbo',api_key=api_key)
def set_prompt(self,prompt=None):
if not prompt:
prompt="You are a bot that answers questions related to NEET (National Eligibility Cum Entrence Test) Biology syllabus based on context provided below."
prompt_template= prompt + """
{context}
question : {question}
"""
self.PROMPT=PromptTemplate(
template=prompt_template,
input_variables=['context','question']
)
class RagBot:
def __init__(self):
self.llm=LLM()
self.pinecone_client=Pinecone_client(self.llm.embeddings)
self.retriever=self.pinecone_client.get_retriever()
self.prompt=self.llm.set_prompt()
self.bot=RetrievalQA.from_chain_type(
llm=self.llm.model,
chain_type='stuff',
retriever=self.retriever.as_retriever(search_type='mmr'),
chain_type_kwargs={'prompt':self.prompt,},
return_source_documents=True
)
| [
"You are a bot that answers questions related to NEET (National Eligibility Cum Entrence Test) Biology syllabus based on context provided below.\n {context}\n question : {question}\n ",
"You are a bot that answers questions related to NEET (National Eligibility Cum Entrence Test) Biology syllabus based on context provided below."
] |
2024-01-10 | tig0826/dotfiles | Alfred~Alfred.alfredpreferences~workflows~user.workflow.45FC2CB3-0DBB-45D8-A7D1-E88085A094D0~src~text_completion.py | """This module contains the text completion API."""
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "libs"))
import openai
openai.api_key = os.getenv("api_key")
__model = os.getenv("model") or "text-davinci-003"
__temperature = float(os.getenv("temperature") or 0.0)
__max_tokens = int(os.getenv("max_tokens") or 50)
__top_p = int(os.getenv("top_p") or 1)
__frequency_penalty = float(os.getenv("frequency_penalty") or 0.0)
__presence_penalty = float(os.getenv("presence_penalty") or 0.0)
def get_query() -> str:
"""Join the arguments into a query string."""
return " ".join(sys.argv[1:])
def prompt_from_query(query: str) -> str:
"""Creates a suitable prompt for the OpenAI API."""
return f"Q: {query}\nA:"
def stdout_write(output_string: str) -> None:
"""Writes the response to stdout."""
output_string = "..." if output_string == "" else output_string
response_dict = {
"variables": {
"request": f"{get_query()}",
},
"items": [
{
"uid": "null",
"type": "default",
"title": output_string,
"subtitle": "⇪, ⌃, ⌥ or ⌘ for options",
"arg": output_string,
"autocomplete": output_string,
"icon": {"path": "./icon.png"},
}
],
}
sys.stdout.write(json.dumps(response_dict))
def env_value_checks() -> None:
"""Checks the environment variables for invalid values."""
if __temperature < 0 or __temperature > 2.0:
stdout_write(
f"🚨 'Temperature' must be ≤ 2.0 and ≥ 0. But you have set it to {__temperature}."
)
sys.exit(0)
if __model == "text-davinci-003" and __max_tokens > 4096:
stdout_write("🚨 'Maximum tokens' must be ≤ 4096 for 'Davinci'.")
sys.exit(0)
if (
__model in ["text-ada-001", "text-babbage-001", "text-curie-001"]
and __max_tokens > 2048
):
model_name = __model.split("-")[1].capitalize()
stdout_write(f"🚨 'Maximum tokens' must be ≤ 4096 for '{model_name}'.")
sys.exit(0)
if __frequency_penalty <= -2.0 or __frequency_penalty >= 2.0:
stdout_write("🚨 'Frequency penalty' must be between -2.0 and 2.0.")
sys.exit(0)
if __presence_penalty <= -2.0 or __presence_penalty >= 2.0:
stdout_write("🚨 'Presence penalty' must be between -2.0 and 2.0.")
sys.exit(0)
def make_request(
model: str,
prompt: str,
temperature: float,
max_tokens: int,
top_p: int,
frequency_penalty: float,
presence_penalty: float,
) -> str:
"""Makes the request to the OpenAI API."""
try:
return (
openai.Completion.create(
model=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=["\n", "<|endoftext|>"],
)
.choices[0]
.text
)
except openai.error.AuthenticationError:
return "🚨 There seems to be something wrong! Please check your API key."
except openai.error.InvalidRequestError:
return "🚨 Hmmm... Something is wrong with your request. Try again later."
except openai.error.ServiceUnavailableError:
return "🚨 Oh no! The server is overloaded or not ready yet."
except openai.error.APIError:
return "🚨 D'oh! The server had an error while processing your request."
except openai.error.APIConnectionError:
return "🚨 There is something fishy with your internet connection. Check your network settings."
except openai.error.RateLimitError:
return "🚨 You have reached the rate limit. Check your settings in your OpenAI dashboard."
except openai.error.Timeout:
return "🚨 The request timed out. Try again later."
env_value_checks()
response = make_request(
__model,
prompt_from_query(get_query()),
__temperature,
__max_tokens,
__top_p,
__frequency_penalty,
__presence_penalty,
)
stdout_write(response)
| [] |
2024-01-10 | MIT-REALM/dcrl | keras_agent~common~cmd_util.py | # Inspired from OpenAI Baselines
import gym
from keras_agent.common.vec_env.subproc_env_vec import SubprocVecEnv
from keras_agent.common import set_global_seeds
def make_gym_env(env_id, num_env=2, seed=123, wrapper_kwargs=None, start_index=0):
"""
Create a wrapped, SubprocVecEnv for Gym Environments.
"""
if wrapper_kwargs is None:
wrapper_kwargs = {}
def make_env(rank): # pylint: disable=C0111
def _thunk():
env = gym.make(env_id)
env.seed(seed + rank)
return env
return _thunk
set_global_seeds(seed)
return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
| [] |
2024-01-10 | MIT-REALM/dcrl | keras_agent~common~vec_env~subproc_env_vec.py | # Inspired from OpenAI Baselines
import numpy as np
from multiprocessing import Process, Pipe
from keras_agent.common.vec_env import VecEnv, CloudpickleWrapper
from keras_agent.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'seed':
val = env.seed(data)
remote.send(val)
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
raise NotImplementedError('Render is not implemented for Synchronous Environment')
def seed(self, i):
rank = i
for remote in self.remotes:
remote.send(('seed', rank))
rank += 1
| [] |
2024-01-10 | vstark1327/CEUR-WS-Event-Series--SS23--Grp-2 | main~matching~proceedings.py | import os
import openai
import json
import re
import random
class OpenAIProceedingsMatcher:
def __init__(self):
self.events = None
self.series = None
self.proceedings = None
self.events_with_title_and_series = []
self.event_series_with_title = []
self.events_with_title_and_series_bijective = []
self.deduced_event_series = []
self.proceedings_with_title = []
self.proceedings_sample = []
self.extracted_texts = []
openai.api_key = os.environ.get("OPENAI_API_KEY")
self.read_files()
self.preprocess()
self.generate_prompt()
self.request_save_response()
self.extract_series_titles()
def read_files(self):
resources_path = os.path.abspath("resources")
events = open(
os.path.join(resources_path, "events.json"), "r", encoding="utf-8"
)
self.events = json.load(events)
events.close()
series = open(
os.path.join(resources_path, "event_series.json"), "r", encoding="utf-8"
)
self.series = json.load(series)
series.close()
proceedings = open(
os.path.join(resources_path, "proceedings.json"), "r", encoding="utf-8"
)
self.proceedings = json.load(proceedings)
proceedings.close()
def preprocess(self):
for binding in self.series["results"]["bindings"]:
if "title" in binding:
self.event_series_with_title.append(binding)
for binding in self.events["results"]["bindings"]:
if "title" in binding and "series" in binding:
self.events_with_title_and_series.append(binding)
event_series_dummy = [event["series"] for event in self.event_series_with_title]
self.events_with_title_and_series_bijective = [
event
for event in self.events_with_title_and_series
if event["series"] in event_series_dummy
]
for binding in self.proceedings["results"]["bindings"]:
if "proceedingTitle" in binding:
self.proceedings_with_title.append(binding)
self.proceedings_sample = random.sample(self.proceedings_with_title, 25)
def generate_prompt(self):
self.conversation = [
{"role": "system", "content": "You are a human"},
{
"role": "user",
"content": 'In wikidata, there are about 3400 entries which are interesting to me. Lets call these as "events". Additionally, there are different and fewer group of entries, lets call these as "event series". Almost all events are a part of event series. I will provide real examples later on, but for context, we can draw similarities to this example: if each star wars movies are "events", then the star wars itself is the "event series".',
},
{
"role": "user",
"content": 'Moreover, there are entities called "proceedings". "proceedings" are collection of scientific papers which are presented in the earlier mentioned "events". It is known that every proceeding comes from an event, or multiple events can publish their proceedings jointly.',
},
{
"role": "user",
"content": "In wikidata, the property connecting the events to event series are missing, and my task is to deduct from the title of the proceedings which event series does this event belong to. For humans it is an easy task for sure, but noone wants to edit thousands of entries by hand. This is where you step in.",
},
{
"role": "user",
"content": "I want you to deduct which event series does the following proceedings belong to. To help you out, i will provide titles of some random events, and their corresponding event series to help you out with the pattern recognition. Then i will provide some sample of proceedings title for you to find out the event series for these.",
},
]
# feed all events & corresponding event series into conversation
for count, item in enumerate(
self.events_with_title_and_series_bijective, start=1
):
self.conversation.append(
{
"role": "user",
"content": "Event "
+ str(count)
+ " is named '"
+ item["title"]["value"]
+ "'",
}
)
self.conversation.append(
{
"role": "assistant",
"content": "The event series for Event "
+ str(count)
+ " is '"
+ item["seriesLabel"]["value"]
+ "'",
}
)
def request_save_response(self):
# feed test dataset titles
for proceeding in self.proceedings_sample:
self.conversation.append(
{
"role": "user",
"content": "The title of 'proceeding' for you to find event series is "
+ ""
+ proceeding["proceedingTitle"]["value"]
+ "",
}
)
# Send the conversation to ChatGPT
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=self.conversation,
stop=None,
temperature=0.7,
)
# Extract the deduced event series from the response
self.deduced_event_series.append(
response["choices"][0]["message"]["content"]
)
# Add the assistant's response to the conversation history
self.conversation.append(
{
"role": "assistant",
"content": response["choices"][0]["message"]["content"],
}
)
resources_path = os.path.abspath("results")
file1 = open(
os.path.join(
resources_path, "proceedings_response_" + response["id"] + ".json"
),
"w",
encoding="utf-8",
)
json.dump(self.deduced_event_series, file1, indent=6)
file1.close()
file2 = open(
os.path.join(
resources_path,
"proceedings_whole_conversation_" + response["id"] + ".json",
),
"w",
encoding="utf-8",
)
json.dump(self.conversation, file2, indent=6)
file2.close()
def extract_series_titles(self):
pattern = "'(.*?)'"
self.extracted_texts = [
re.search(pattern, s).group(1) if re.search(pattern, s) else ""
for s in self.deduced_event_series
]
| [
"The event series for Event ",
"proceedingTitle",
" is named '",
"seriesLabel",
"Event ",
" is '",
"In wikidata, the property connecting the events to event series are missing, and my task is to deduct from the title of the proceedings which event series does this event belong to. For humans it is an easy task for sure, but noone wants to edit thousands of entries by hand. This is where you step in.",
"You are a human",
"In wikidata, there are about 3400 entries which are interesting to me. Lets call these as \"events\". Additionally, there are different and fewer group of entries, lets call these as \"event series\". Almost all events are a part of event series. I will provide real examples later on, but for context, we can draw similarities to this example: if each star wars movies are \"events\", then the star wars itself is the \"event series\".",
"Moreover, there are entities called \"proceedings\". \"proceedings\" are collection of scientific papers which are presented in the earlier mentioned \"events\". It is known that every proceeding comes from an event, or multiple events can publish their proceedings jointly.",
"I want you to deduct which event series does the following proceedings belong to. To help you out, i will provide titles of some random events, and their corresponding event series to help you out with the pattern recognition. Then i will provide some sample of proceedings title for you to find out the event series for these.",
"content",
"The title of 'proceeding' for you to find event series is "
] |
2024-01-10 | vstark1327/CEUR-WS-Event-Series--SS23--Grp-2 | main~matching~event_series.py | import os
import openai
import json
from sklearn.model_selection import train_test_split
import copy
import re
class OpenAISeriesMatcher:
def __init__(self):
self.events = None
self.series = None
self.events_with_title_and_series = []
self.event_series_with_title = []
self.events_with_title_and_series_bijective = []
self.train = []
self.test = []
self.test_dropped = []
self.deduced_event_series = []
self.extracted_texts = []
openai.api_key = os.environ.get("OPENAI_API_KEY")
self.read_files()
self.preprocess()
self.generate_prompt()
self.request_save_response()
self.extract_series_titles()
def read_files(self):
resources_path = os.path.abspath("resources")
events = open(
os.path.join(resources_path, "events.json"), "r", encoding="utf-8"
)
self.events = json.load(events)
events.close()
series = open(
os.path.join(resources_path, "event_series.json"), "r", encoding="utf-8"
)
self.series = json.load(series)
series.close()
def preprocess(self):
for binding in self.series["results"]["bindings"]:
if "title" in binding:
self.event_series_with_title.append(binding)
for binding in self.events["results"]["bindings"]:
if "title" in binding and "series" in binding:
self.events_with_title_and_series.append(binding)
event_series_dummy = [event["series"] for event in self.event_series_with_title]
self.events_with_title_and_series_bijective = [
event
for event in self.events_with_title_and_series
if event["series"] in event_series_dummy
]
self.train, self.test = train_test_split(
self.events_with_title_and_series_bijective, test_size=0.1
)
self.test_dropped = copy.deepcopy(self.test)
for item in self.test_dropped:
if "series" in item:
del item["series"]
if "seriesLabel" in item:
del item["seriesLabel"]
if "ordinal" in item:
del item["ordinal"]
def generate_prompt(self):
self.conversation = [
{"role": "system", "content": "You are a human"},
{
"role": "user",
"content": 'In wikidata, there are about 3400 entries which are interesting to me. Lets call these as "events". Additionally, there are different and fewer group of entries, lets call these as "event series". Almost all events are a part of event series. I will provide real examples later on, but for context, we can draw similarities to this example: if each star wars movies are "events", then the star wars itself is the "event series".',
},
{
"role": "user",
"content": "In wikidata, the property connecting the events to event series are missing, and my task is to deduct from the title of the event which event series does this event belong to. For humans it is an easy task for sure, but noone wants to edit thousands of entries by hand. This is where you step in.",
},
{
"role": "user",
"content": "I want you to deduct which event series does the following events belong to. To help you out, i will provide titles of some random events, and their corresponding event series to help you out with the pattern recognition. Then i will provide more events for you to find out the event series for these.",
},
]
# feed training events & corresponding event series into conversation
for count, item in enumerate(self.train, start=1):
self.conversation.append(
{
"role": "user",
"content": "Event "
+ str(count)
+ " is named '"
+ item["title"]["value"]
+ "'",
}
)
self.conversation.append(
{
"role": "assistant",
"content": "The event series for Event "
+ str(count)
+ " is '"
+ item["seriesLabel"]["value"]
+ "'",
}
)
def request_save_response(self):
# feed test dataset titles
for event in self.test_dropped:
self.conversation.append(
{
"role": "user",
"content": "The event for you to find event series is "
+ event["title"]["value"],
}
)
# Send the conversation to ChatGPT
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=self.conversation,
stop=None,
temperature=0.7,
)
# Extract the deduced event series from the response
self.deduced_event_series.append(
response["choices"][0]["message"]["content"]
)
# Add the assistant's response to the conversation history
self.conversation.append(
{
"role": "assistant",
"content": response["choices"][0]["message"]["content"],
}
)
resources_path = os.path.abspath("results")
file1 = open(
os.path.join(
resources_path, "event_series_response_" + response["id"] + ".json"
),
"w",
encoding="utf-8",
)
json.dump(self.deduced_event_series, file1, indent=6)
file1.close()
file2 = open(
os.path.join(
resources_path, "whole_conversation_" + response["id"] + ".json"
),
"w",
encoding="utf-8",
)
json.dump(self.conversation, file2, indent=6)
file2.close()
def extract_series_titles(self):
pattern = "'(.*?)'"
self.extracted_texts = [
re.search(pattern, s).group(1) if re.search(pattern, s) else ""
for s in self.deduced_event_series
]
def compare_results(self):
sum = 0
for line, org in zip(self.extracted_texts, self.test):
if line == org["seriesLabel"]["value"]:
sum += 1
print("accuracy: ", sum / len(self.test))
| [
"In wikidata, the property connecting the events to event series are missing, and my task is to deduct from the title of the event which event series does this event belong to. For humans it is an easy task for sure, but noone wants to edit thousands of entries by hand. This is where you step in.",
"The event series for Event ",
" is named '",
"I want you to deduct which event series does the following events belong to. To help you out, i will provide titles of some random events, and their corresponding event series to help you out with the pattern recognition. Then i will provide more events for you to find out the event series for these.",
"seriesLabel",
"Event ",
"In wikidata, there are about 3400 entries which are interesting to me. Lets call these as \"events\". Additionally, there are different and fewer group of entries, lets call these as \"event series\". Almost all events are a part of event series. I will provide real examples later on, but for context, we can draw similarities to this example: if each star wars movies are \"events\", then the star wars itself is the \"event series\".",
" is '",
"You are a human",
"The event for you to find event series is ",
"content"
] |
2024-01-10 | AZURE-ARC-0/AIRobin3 | addons~full_website_search.py | import requests
from bs4 import BeautifulSoup
description = "Used to answer questions about an entire website, not just a single page. The website is scraped and vectorized, and then the 3 most similar chunks of text are retrieved."
parameters = {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL of the website to visit.",
},
"query": {
"type": "string",
"description": "The query to retrieve the most similar chunks of text from a vectorized representation of the website.",
},
"include_links": {
"type": "boolean",
"description": "Whether or not to include links in the scraped data.",
"default": True
},
"include_images": {
"type": "boolean",
"description": "Whether or not to include images in the scraped data.",
"default": True
},
},
"required": ["url", "query"],
}
import os
import joblib
import openai
import requests
from bs4 import BeautifulSoup
import numpy as np
import faiss
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import re
import nltk
from tenacity import retry, wait_random_exponential, stop_after_attempt
import config
# Set the OpenAI API key
openai.api_key = config.OPENAI_API_KEY
visited_links = set()
all_links = set()
def get_website_name(url):
return url.replace('https://', '').replace('http://', '').replace('www.', '').split('/')[0]
def visit_website(url, include_links=True, include_images=True):
print(f'Visiting {url}\n')
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3")
options.add_argument("--disable-dev-shm-usage")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
for script in soup(["script", "style"]):
script.decompose()
data = ''
if include_images:
images = [img.get('src') for img in soup.find_all('img') if img.get('src') and img.get('src').startswith('http')]
data += '\nImages: ' + '\n'.join(images) + '\n'
text = ' '.join(soup.stripped_strings)
data += '\nText: ' + text
if include_links:
links = [link.get('href') for link in soup.find_all('a') if link.get('href') and link.get('href').startswith('http')]
for link in links:
if link not in all_links:
all_links.add(link)
if link not in visited_links and get_website_name(url) in link:
if not re.search(r'\.(jpg|jpeg|png|gif|svg|mp4|mp3|avi|wav|mov|flv|wmv|webm|pdf|doc|docx|xls|xlsx|ppt|pptx|zip|rar|7z|gz|tar|iso|exe|dmg|apk|csv|tsv|json|xml|txt|rtf|odt|ods|odp|odg|odf|odb|odc|odm|ott|ots|otp|otg|otf|oti|oth|sxw|stw|sxc|stc|sxi|sti|sxd|std)', link):
visited_links.add(link)
link_data, link_url = visit_website(link)
data += link_data
driver.quit()
return data, url
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embedding(text: str, model="text-embedding-ada-002") -> list[float]:
return openai.Embedding.create(input=[text], model=model)["data"][0]["embedding"]
def retrieve_content(query: str, embeddings, chunks, top_n=3):
query_embedding = get_embedding(query)
query_embedding_np = np.array(query_embedding).astype('float32').reshape(1, -1)
embeddings_np = np.array(embeddings).astype('float32')
faiss.normalize_L2(embeddings_np)
faiss.normalize_L2(query_embedding_np)
index = faiss.IndexFlatIP(len(query_embedding_np[0]))
index.add(embeddings_np)
D, I = index.search(query_embedding_np, top_n)
most_similar_indices = I[0]
scores = D[0]
results = []
for idx, score in zip(most_similar_indices, scores):
results.append((chunks[idx], score, idx))
return results
def process_website(website_url, include_links=True, include_images=True):
website_name = get_website_name(website_url)
try:
scraped_data, url = joblib.load(f'scraped/{website_name}_scraped_data.joblib')
except FileNotFoundError:
scraped_data, url = visit_website(website_url, include_links, include_images)
scraped_data += '\nLinks: ' + '\n'.join(all_links) + '\n'
# make sure the scraped folder exists
if not os.path.exists('scraped'):
os.makedirs('scraped')
joblib.dump((scraped_data, url), f'scraped/{website_name}_scraped_data.joblib')
try:
embeddings = joblib.load(f'scraped/{website_name}_embeddings.joblib')
chunks = joblib.load(f'scraped/{website_name}_chunks.joblib')
except FileNotFoundError:
embeddings = []
chunks = []
content = scraped_data
sentence_chunks = nltk.sent_tokenize(content)
for i, sentence in enumerate(sentence_chunks):
if i % 5 == 0:
chunks.append({"text": sentence, "url": url})
else:
chunks[-1]["text"] += '\n' + sentence
for chunk in chunks:
embeddings.append(get_embedding(chunk["text"]))
joblib.dump(embeddings, f'scraped/{website_name}_embeddings.joblib')
joblib.dump(chunks, f'scraped/{website_name}_chunks.joblib')
return embeddings, chunks
def full_website_search(url, query, include_links=True, include_images=True):
results = []
website_url = url
embeddings, chunks = process_website(website_url, include_links, include_images)
results = retrieve_content(query, embeddings, chunks)
for i, (result, score, index) in enumerate(results):
results[i] = f"Result {i+1} (Score: {score}):\n{result['text']}\nSource: {result['url']}\nIndex: {index}"
return '\n\n'.join(results) + '\n\n' | [] |
2024-01-10 | nrl-ai/pautobot | pautobot~engine~llm_factory.py | from langchain.llms import GPT4All
from pautobot.utils import download_model
class LLMFactory:
"""Factory for instantiating LLMs."""
@staticmethod
def create_llm(
model_type, model_path, model_n_ctx, streaming=False, verbose=False
):
# Download the model
download_model(model_type, model_path)
# Prepare the LLM
if model_type == "GPT4All":
return GPT4All(
model=model_path,
n_ctx=model_n_ctx,
backend="gptj",
streaming=streaming,
verbose=verbose,
)
else:
raise ValueError(f"Invalid model type: {model_type}")
| [] |
2024-01-10 | nrl-ai/pautobot | pautobot~engine~chatbot_factory.py | from langchain import LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
class ChatbotFactory:
"""Factory for instantiating chatbots."""
@staticmethod
def create_chatbot(
llm,
):
template = """Assistant is a large language model train by human.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
{history}
Human: {human_input}
Assistant:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"], template=template
)
chatbot_instance = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=2),
)
return chatbot_instance
| [
"Assistant is a large language model train by human.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n\n{history}\nHuman: {human_input}\nAssistant:",
"human_input"
] |
2024-01-10 | nrl-ai/pautobot | pautobot~engine~qa_factory.py | from chromadb.config import Settings
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from pautobot.engine.bot_context import BotContext
class QAFactory:
"""Factory for instantiating QAs."""
@staticmethod
def create_qa(
context: BotContext,
llm,
):
chroma_settings = Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=context.search_db_directory,
anonymized_telemetry=False,
)
embeddings = HuggingFaceEmbeddings(
model_name=context.embeddings_model_name
)
database = Chroma(
persist_directory=context.search_db_directory,
embedding_function=embeddings,
client_settings=chroma_settings,
)
retriever = database.as_retriever(search_kwargs={"k": 4})
qa_instance = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
)
return qa_instance
| [] |
2024-01-10 | alexbache/transcript-finetune | open_ai_functions.py | import openai
import os
import datetime
# set your openai api key using os environment variables
if openai.api_key == "":
# try except block to check if openai key has been set
try:
openai.api_key = os.getenv["OPENAI_API_KEY"]
except:
print("Please set your openai api key in the environment variables")
exit()
# functions for interacting with openai api
def upload_to_openai(filename):
openai.File.create(
file=open(filename, "rb"),
purpose='fine-tune'
)
# get all files that you have uploaded to openai
def get_all_files():
files = openai.File.list()
for file in files['data']:
# print(file.id, file.filename)
print(file)
# get all fine-tuned models that you have created
def get_all_finetunes():
# get all fine-tuned models
finetunes = openai.FineTune.list()['data']
for ft in finetunes:
print(ft['fine_tuned_model'])
# create a fine-tuned model
def fine_tune_model(training_file, training_model = "davinci", custom_name = "my-custom-model"):
openai.FineTune.create(training_file = training_file, model = training_model, suffix = custom_name)
def get_finetune_status():
response = openai.FineTune.list()
data = response['data']
for item in data:
dt_created = datetime.fromtimestamp(item['created_at'])
dt_updated = datetime.fromtimestamp(item['updated_at'])
time_taken = dt_updated - dt_created
print(item['object'], item['id'], item['status'], item['model'],item['fine_tuned_model'], dt_created,dt_updated, time_taken)
return | [] |
2024-01-10 | JacobxChoi/ChatBot | convo.py | import openai
from emora_stdm import DialogueFlow
import Macros
#global
from globalState import globalState
#knowLeague
from KnowLeague.knowsLeague import knowsLeague
from KnowLeague.advanced import advanced
from KnowLeague.casual import casual
#doesnKnowLeague
from doesntKnowLeagueEsports.doesntKnowLeague import doesntKnowLeague
from doesntKnowLeagueEsports.IntroduceLeague import IntroduceLeague
from doesntKnowLeagueEsports.laneInfo import laneInfo
#macros
from Macros import MacroEsportsOrLeague, UserInputChampion, MacroGetName, MacroGetOldName, \
MacroGetNewName, MacroPushName, favRegion, MacroGPTJSON, getFavGame, MacroNLG,getReason,getActivityWithFriends, \
PositiveAgreement, NegativeAgreement, MacroGoalAnalogy, getRandomGame, getSportsEvent,MacroEsportAttitudeResponse, MacroGPTHAIKU, MacroFunTripError,getChampionRecommendReason, \
PositiveAgreement, NegativeAgreement, MacroGoalAnalogy, getRandomGame, getSportsEvent,MacroEsportAttitudeResponse, MacroGPTHAIKU, MacroFunTripError, GetPlayerActivity
#imports babel conversation
import babel
from babel import babel
#convo.py imports
import pickle
import os
#knowsLeague
casual, edg, keria = casual()
advanced = advanced()
favoriteTeam, favoriteRegion = knowsLeague()
babel = babel()
#doesntKnowLeague
doesntKnowLeague, items, base, laneInfo, IntroduceLeague, IntroduceGame, IntroduceChampions, IntroduceEsports, IntroduceObjectives, ChampionRoles, SpecificTeams, SpecificPlayers, RecommendChampions, PopularChampions, ChampionTypes, ChampionRoles, StartPlaying, StartWatching = doesntKnowLeague()
#global transition
globalState = globalState()
def save(df: DialogueFlow, varfile: str):
df.run()
d = {k: v for k, v in df.vars().items() if not k.startswith('_')}
pickle.dump(d, open(varfile, 'wb'))
def load(df: DialogueFlow, varfile: str):
# has conversed before
if os.path.isfile('resources/visits.pkl'):
d = pickle.load(open(varfile, 'rb'))
df.vars().update(d)
df.run()
# df.run(debugging=True)
save(df, varfile)
# first time conversing
else:
df.run()
# df.run(debugging=True)
save(df, varfile)
# This is the welcoming transition
transitions = {
'state': 'start',
##Welcoming section
'`Hi, this is LoLa, your personal chatbot for LoL esports dialogue. Could you tell me your name and a bit about yourself?`': {
'#GET_NAME_GPT #GET_NAME': {
'#IF(#GET_NEWNAME) `Nice to meet you,` #NAME #PlayerActivity': 'DIVERGE',
'#IF(#GET_OLDNAME) `Welcome back!` #NAME `!`': 'DIVERGE',
'error': {
'`Nice to meet you!`': 'DIVERGE'
}
}
},
}
# This transition distributes the users to different branches of transitions based on their acquistion levels
transitionDiverging = {
'state': 'DIVERGE',
'`Do you keep up with League of Legends esports? What do you think about it? `': {
'[#AgreementChecker]': {
'#IF(#POSITIVE_AGREEMENT) `Nice.`': 'favPlayer',
'#IF(#NEGATIVE_AGREEMENT) `That\'s fine.`': 'doesntKnowLeagueEsports'
},
'error': {
'`Sorry, I didn\'t quite understand. Do you keep up with league esports?`': 'agreeOrNot'
}
}
}
macros = {
'LEAGUE': MacroEsportsOrLeague(),
'UserChamp': UserInputChampion(),
'NAME': MacroPushName(),
'GET_NAME': MacroGetName(),
'GET_NEWNAME': MacroGetNewName(),
'GET_OLDNAME': MacroGetOldName(),
'FAV_REGION': favRegion(),
'FAV_GAMETYPE':MacroGPTJSON(
'What is the game user mentioned, what is the type of the game. Give an example of the other game in the category and give one sentence answer why people love the game speaker mentioned starting with "it offers"',
{'GameName': 'Legend of Zelda', 'GameType': 'Adventure game', 'OtherGame': 'Xenoblade Chronicles', 'WhyInterest': 'They offer a unique and immersive gameplay experience that allows players to express their creativity, engage in friendly competition, and form lasting social connections.'},
{'GameName': 'N/A', 'GameType': 'N/A', 'OtherGame': 'N/A', 'WhyInterest': 'N/A'}
),
'ACTIVITY_WITH_FRIENDS':MacroGPTJSON(
'What does the activity the speaker typically do with friends, the activity stored should start with an verb',
{'WithFriendActivities': 'go hiking'},
{'WithFriendActivities': 'N/A'}
),
'GET_FAV_GAME': MacroNLG(getFavGame),
'GET_REASON_FAV_GAME': MacroNLG(getReason),
'GET_ACTIVITY_FRIEND': MacroNLG(getActivityWithFriends),
'SportEvents': MacroGPTJSON(
'What is the sports event user mentioned, if the user doesn\'t like any sports event, return no "',
{'SportEvent': 'NBA'},
{'SportEvent': 'N/A'}
),
'GET_SportsEvent': MacroNLG(getSportsEvent),
'POSITIVE_AGREEMENT': MacroNLG(PositiveAgreement),
'NEGATIVE_AGREEMENT': MacroNLG(NegativeAgreement),
'AgreementChecker': MacroGPTJSON(
'How does the speaker response to the yes or no question, give yes if user answers "yes", or shows interest , and no if user answers with "no" or is not interested ',
{'Agreement': 'yes'},
{'Agreement': 'no'}
),
'GET_NAME_GPT': MacroGPTJSON(
'What is the user\'s name? And what do they enjoy doing? If the user doesn\'t mention what they enjoy, just give a random habit ',
{'FIRSTNAME': 'Jacob', 'ROLE': 'being a student'},
{'FIRSTNAME': 'NA', 'ROLE': 'being a human'}
),
# 'SET_NAME_GPT':
'ESportAttitudeChecker': MacroGPTJSON(
'What is the speakers\'s attitude toward esport events. Attitude options are LookingForward, Excitement, Indifference, Unwilling, Open-mindednness',
{'EsportAttitude': 'LookingForward'},
),
'ESportAttitudeResponse': MacroEsportAttitudeResponse(),
'GameGoalAnalogy': MacroGoalAnalogy(),
#for advanced.py
'RANDGAME' : getRandomGame(),
#testing global
'GET_HAIKU': MacroGPTHAIKU(
'Write the user a haiku in the following format:',
{'HAIKU':'love between us is - speech and breath. loving you is - a long river running.'},
{'HAIKU':'NA'}
),
'FunTripError': MacroFunTripError(),
'PlayerActivity': GetPlayerActivity()
}
df = DialogueFlow('start', end_state='end')
#ontology
df.knowledge_base().load_json_file('resources/teams.json')
# df.knowledge_base().load_json_file('resources/gameType.json')
#funny diversions
funny_diversions = {
'[touch grass]': "`You think you\'re sooo funny. You are, though :). Anyways, you were saying?` (3.0)",
'[your {mom, mother}]': "`Yeah, I thought we had an earthquake, but she was just hungry. Anyways, you were saying?` (3.0)",
'[joke]': "pp"
}
df.load_update_rules(funny_diversions)
#doesntKnowLeague transitions
df.load_transitions(doesntKnowLeague)
df.load_transitions(transitionDiverging)
df.load_transitions(transitions)
df.load_transitions(IntroduceLeague)
df.load_transitions(laneInfo)
df.load_transitions(IntroduceGame)
df.load_transitions(IntroduceChampions)
df.load_transitions(IntroduceEsports)
df.load_transitions(IntroduceObjectives)
df.load_transitions(ChampionRoles)
df.load_transitions(SpecificTeams)
df.load_transitions(SpecificPlayers)
df.load_transitions(RecommendChampions)
df.load_transitions(PopularChampions)
df.load_transitions(ChampionRoles)
df.load_transitions(ChampionTypes)
df.load_transitions(StartPlaying)
df.load_transitions(StartWatching)
#knowsLeague transitions
df.load_transitions(favoriteTeam)
df.load_transitions(favoriteRegion)
df.load_transitions(casual)
df.load_transitions(edg)
df.load_transitions(keria)
df.load_transitions(advanced)
#global transition
df.load_global_nlu(globalState)
#babel
df.load_transitions(babel)
#macros
df.add_macros(macros)
if __name__ == '__main__':
openai.api_key_path = Macros.OPENAI_API_KEY_PATH
load(df, 'resources/visits.pkl')
| [] |
2024-01-10 | JacobxChoi/ChatBot | Macros.py | import json
import random
import re
from typing import Dict, Any, List, Callable, Pattern
from json import JSONDecodeError
import spacy
from emora_stdm import Macro, Ngrams
import openai
import regexutils
OPENAI_API_KEY_PATH = 'resources/openai_api.txt'
CHATGPT_MODEL = 'gpt-3.5-turbo'
class MacroGetName(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
#TODO: CHANGE THIS TO CHATGPT BASED
# r = re.compile(
# r"(?:(?:hi)?(?:\,)?(?:\s)*my(?:\s)*(?:name|nickname)(?:\s)*(?:is)?(?:\s)*|(?:hi)?(?:\,)?(?:\s)*i(?:\s)*am(?:\s)*|(?:please|you(?:\s)*can|everyone)?(?:\s)*(?:call|calls)(?:\s)*me(?:\s)*?|(?:hi)?(?:\,)?(?:\s)*i(?:\')?(?:m)?(?:ts)?(?:t\'s)?(?:\s)*(?:go by)?)?(?:\s)*(mr|mrs|ms|dr|dr\.)?(?:^|\s)*([a-z']+)(?:\s([a-z']+))?(?:(?:\,)?(?:\s)*.*)?")
#
# title, firstname, lastname = None, None, None
#
# for m in re.finditer(r, ngrams.text()):
# if m.group(1) is not None:
# title = m.group(1)
# if m.group(2) is not None:
# firstname = m.group(2)
# if m.group(3) is not None:
# lastname = m.group(3)
#
# if title is None and firstname is None and lastname is None:
# return False
#
# vars['TITLE'] = title
# vars['LASTNAME'] = lastname
vn_FN = 'FIRSTNAME'
vn_PI = 'PLAYERINFO'
if vn_FN not in vars:
pass
else:
firstname = vars[vn_FN]
vn_firstname = firstname.capitalize()
#if 'FIRSTNAME' var isn't in vars
# if vn_FN not in vars:
# vars[vn_FN] = firstname
# vars[vn_firstname] = False
if vn_PI not in vars:
vars[vn_PI] = {}
vars[vn_PI][vn_firstname] = {}
vars[vn_firstname] = False
return True
#if vn_firstname (their actual name) isn't in vars['FIRSTNAME']
# if vn_firstname not in vars['FIRSTNAME']:
if vn_firstname not in vars[vn_PI]:
vars['FIRSTNAME'] = vn_firstname
vars[vn_PI][vn_firstname] = {}
vars[vn_firstname] = False
else:
vars[vn_firstname] = True
# add dictionary to gather info about user
return True
class MacroGetOldName(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
vn = vars['FIRSTNAME']
return vars[vn]
class MacroGetNewName(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
vn = vars['FIRSTNAME']
return not vars[vn]
class MacroPushName(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
return vars['FIRSTNAME'] + '.'
class GetPlayerActivity(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
role = 'ROLE'
if 'NA' in vars[role] or 'enjoyment not mentioned' in vars[role] or 'unknown' in vars[role] or 'unknown (no mention of interests)' in vars[role]:
return
else:
return 'That\'s really cool!'
class MacroEsportsOrLeague(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
r = re.compile(r"(dont.*play(?:ing)*)|([lL]eague(?:\s)*(?:[oO]f [lL]egend(?:s)?)?)?")
# m = r.search(ngrams.text())
hasLeague = False
for m in re.finditer(r, ngrams.text()):
if m.group(1) or m.group(2) is not None:
hasLeague = True
return hasLeague
class UserInfo(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
# variables used in conversation so far.
# TODO We don't need to intialize the variables here, but it might be good to keep track of which variables we use
visits = 'VISITS'
player = 'FAV_PLAYER'
playerRec = 'PLAYER_RECOMMEND'
champ = 'FAV_CHAMP'
vars[champ] = ''
vars[player] = ''
vars[playerRec] = ''
vars[visits] = 1
class favRegion(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
userResponse = ngrams.text()
# opens json
f = open('resources/tourneys.json', )
data = json.load(f)
# gets user input
mystr = ngrams.text().split()
# different ways a user can reference the regions
regions = {'na': 'NORTH AMERICA',
'north': 'NORTH AMERICA',
'america': 'NORTH AMERICA',
'north american': 'NORTH AMERICA',
'lcs': 'NORTH AMERICA',
'nacl': 'NORTH AMERICA',
'kr': 'KOREA',
'korea': 'KOREA',
'lck': 'KOREA',
'emea': 'EMEA',
'lec': 'EMEA',
'tcl': 'EMEA',
'latam': 'LATIN AMERICA',
'lla': 'LATIN AMERICA',
'hong kong': 'HONG KONG, MACAU, TAIWAN',
'macau': 'HONG KONG, MACAU, TAIWAN',
'taiwan': 'HONG KONG, MACAU, TAIWAN',
'pcs': 'HONG KONG, MACAU, TAIWAN',
'cis': 'COMMONWEALTH OF INDEPENDENT STATES',
'lcl': 'COMMONWEALTH OF INDEPENDENT STATES',
'tr': 'TURKEY',
'turkey': 'TURKEY',
'vt': 'VIETNAM',
'vcs': 'VIETNAM',
'vietnam': 'VIETNAM',
'oc': 'OCEANIA',
'oceania': 'OCEANIA',
'lco': 'OCEANIA',
'international': 'INTERNATIONAL',
'br': 'BRAZIL',
'brazil': 'BRAZIL',
'cblol': 'BRAZIL',
'cn': 'CHINA',
'china': 'CHINA',
'lpl': 'CHINA',
'jp': 'JAPAN',
'japan': 'JAPAN',
'japan': 'JAPAN',
'ljl': 'JAPAN'
}
# labeled T because they're temporary variables that are not meant to be stored.
t_tourney = 'T_TOURNEY'
t_typeOfMatch = 'T_MATCH'
team1 = 'T_TEAM1'
team2 = 'T_TEAM2'
winner = 'T_WINNER'
loser = 'T_LOSER'
t_date = 'T_DATE'
winner_code = 'WINNER_CODE'
loser_code = 'LOSER_CODE'
# t_month = 'T_MONTH'
# t_day = 'T_DAY'
vars[t_tourney] = ''
vars[t_typeOfMatch] = ''
vars[team1] = ''
vars[team2] = ''
vars[winner] = ''
vars[winner_code] = ''
vars[loser_code] = ''
# vars[t_day] = ''
# vars[t_month] = ''
# region local variable
region = ''
# sees if nouns match region dictionary and retrieves region
for word in mystr:
if word.lower() in regions:
region = regions[word.lower()]
# no region found. Return false
if region == '':
return False
else: #ADDS FAVORITE REGION TO PLAYERINFO
vn_PI = 'PLAYERINFO'
vn_FN = 'FIRSTNAME'
vn_FR = 'FAV_REGION'
vars[vn_PI][vars[vn_FN]][vn_FR] = region
# some regions don't have any games from this year so far. If this is the case, return false
if (len(data['ontology'][region]) >= 1):
#picks random tourney from region
tourney = data['ontology'][region]
tourney = tourney[random.randrange(len(tourney))]
hasTourney = True
noTourney = 0
#if a regional tourney is empty, select another tourney
while(hasTourney):
noTourney +=1
if tourney not in data['ontology']:
tourney = data['ontology'][region]
tourney = tourney[random.randrange(len(tourney))]
else:
hasTourney = False
if noTourney >= 17:
print("NO GAMES IN TOURNEY")
return False
#stores tourney into vars
vars[t_tourney] = tourney.replace('_', ' ')
else:
return False
# pulling game info from ontology. Last index -1 means most recent game. LOLA should remember which game was suggested
game = data['ontology'][tourney]
game = data['ontology'][tourney][random.randrange(len(game))]
#storing suggested game to personal info
vn_GS = 'GAME_SUGGESTED'
#if user already has games suggested to them
if vn_GS in vars[vn_PI][vars[vn_FN]]:
vars[vn_PI][vars[vn_FN]][vn_GS].append(game)
#user has not yet had games suggested to them
else:
vars[vn_PI][vars[vn_FN]][vn_GS] = []
vars[vn_PI][vars[vn_FN]][vn_GS].append(game)
#update variables to get random game
typeOfMatch = game['week']
vars[team1] = game['teams'][0]
vars[team2] = game['teams'][1]
vars[winner] = game['winner']
date = game['time'][0:10]
month = date[5:7]
day = date[-2:]
year = date[0:4]
# adds date to vars
vars[t_date] = month + '/' + day + '/' + year
# print(vars)
#gets winners and loser
if vars[winner] == game['teams'][1]:
vars[loser] = game['teams'][0]
vars[loser_code] = game['teamCodes'][0]
vars[winner_code] = game['teamCodes'][1]
else:
vars[loser] = game['teams'][1]
vars[loser_code] = game['teamCodes'][1]
vars[winner_code] = game['teamCodes'][0]
# playoffs
if typeOfMatch[0:8] == 'Playoffs':
vars[t_typeOfMatch] = typeOfMatch[-7:].lower() + " " + typeOfMatch[0:8].lower()
# knockout or weekly games
else:
vars[t_typeOfMatch] = typeOfMatch.lower()
# change numerical month to month name
# if month == '01':
# vars[t_month] = 'January'
# elif month == '02':
# vars[t_month] = 'February'
# elif month == '03':
# vars[t_month] = 'March'
# elif month == '04':
# vars[t_month] = 'April'
#
# # rd, st, th for days
# if day[-1:] == '2' or day[-1:] == '3':
# vars[t_day] = day + "rd"
# elif day[-1:] == 1:
# vars[t_day] = day + "st"
# else:
# vars[t_day] = day + "th"
return True
class getRandomGame(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
print(ngrams)
if ngrams == None or 'yes' in ngrams or 'yeah' in ngrams:
f = open('resources/tourneys.json', )
data = json.load(f)
#vars for game
t_tourney = 'T_TOURNEY'
t_typeOfMatch = 'T_MATCH'
team1 = 'T_TEAM1'
team2 = 'T_TEAM2'
winner = 'T_WINNER'
loser = 'T_LOSER'
t_month = 'T_MONTH'
t_day = 'T_DAY'
#vars for playerinfo
vn_PI = 'PLAYERINFO'
vn_FN = 'FIRSTNAME'
vn_FR = 'FAV_REGION'
region = vars[vn_PI][vars[vn_FN]][vn_FR]
if (len(data['ontology'][region]) >= 1):
# picks random tourney from region
tourney = data['ontology'][region]
tourney = tourney[random.randrange(len(tourney))]
hasTourney = True
noTourney = 0
# if a regional tourney is empty, select another tourney
while (hasTourney):
noTourney += 1
if tourney not in data['ontology']:
tourney = data['ontology'][region]
tourney = tourney[random.randrange(len(tourney))]
else:
hasTourney = False
# TODO: handle case where there are tourneys, but no games
if noTourney >= 17:
print("NO GAMES IN TOURNEY")
return False
# stores tourney into vars
vars[t_tourney] = tourney.replace('_', ' ')
else:
print("REGION HAS NO GAMES")
return False
# pulling game info from ontology. Last index -1 means most recent game. LOLA should remember which game was suggested
game = data['ontology'][tourney]
game = data['ontology'][tourney][random.randrange(len(game))]
# storing suggested game to personal info
vn_GS = 'GAME_SUGGESTED'
# if user already has games suggested to them
if vn_GS in vars[vn_PI][vars[vn_FN]]:
vars[vn_PI][vars[vn_FN]][vn_GS].append(game)
# user has not yet had games suggested to them
else:
vars[vn_PI][vars[vn_FN]][vn_GS] = []
vars[vn_PI][vars[vn_FN]][vn_GS].append(game)
# update variables to get random game
typeOfMatch = game['week']
vars[team1] = game['teams'][0]
vars[team2] = game['teams'][1]
vars[winner] = game['winner']
date = game['time'][0:10]
month = date[5:7]
day = date[-2:]
# gets winners and loser
if vars[winner] == game['teams'][1]:
vars[loser] = game['teams'][0]
else:
vars[loser] = game['teams'][1]
# playoffs
if typeOfMatch[0:8] == 'Playoffs':
vars[t_typeOfMatch] = typeOfMatch[-7:].lower() + " " + typeOfMatch[0:8].lower()
# knockout or weekly games
else:
vars[t_typeOfMatch] = typeOfMatch.lower()
# change numerical month to month name
if month == '01':
vars[t_month] = 'January'
elif month == '02':
vars[t_month] = 'February'
elif month == '03':
vars[t_month] = 'March'
elif month == '04':
vars[t_month] = 'April'
# rd, st, th for days
if day[-1:] == '2' or day[-1:] == '3':
vars[t_day] = day + "rd"
elif day[-1:] == 1:
vars[t_day] = day + "st"
else:
vars[t_day] = day + "th"
return True
else:
return False
class UserInputChampion(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
# variables
playerRec = 'PLAYER_RECOMMEND'
fav_champ = 'FAV_CHAMP'
vn_PI = 'PLAYERINFO'
vn_FN = 'FIRSTNAME'
# opening jason
f = open('resources/champs.json', )
data = json.load(f)
mystr = ngrams.text()
# takes user input as string
champs = {
'k\'sante':'ksante',
'cho gath':'chogath',
'cho\'gath':'chogath',
'lee sin': 'leesin',
'jarvan':'jarvaniv',
'jarvan iv':'jarvaniv',
'dr mundo':'drmundo',
'dr. mundo': 'drmundo',
'tahm kench':'tahmkench',
'xin zhao':'xinzhao',
'bel\'veth':'belveth',
'bel veth':'belveth',
'kha zix':'khazix',
'kha\'zix': 'khazix',
'master yi':'masteryi',
'rek\'sai':'reksai',
'rek sai': 'reksai',
'le\'blanc':'leblanc',
'le blanc': 'leblanc',
'aurelion sol':'aurelionsol',
'vel\'koz':'velkoz',
'vel koz': 'velkoz',
'twisted fate':'twistedfate',
'kog maw':'kogmaw',
'kog\'maw': 'kogmaw',
'miss fortune':'missfortune'
}
for key in champs:
if key in mystr.lower():
#user's favorite champion stored as temp variable
vars[fav_champ] = champs[key].capitalize()
#if user already has said their favorite champion
if fav_champ in vars[vn_PI][vars[vn_FN]]:
pass
#user has not yet said their favorite champion
else:
vars[vn_PI][vars[vn_FN]][fav_champ] = champs[key].capitalize()
# grabs player that plays this champion
player = data['ontology'][champs[key]][random.randrange(len(data['ontology'][champs[key]]))]
#playerRec has alreayd been made
if playerRec in vars[vn_PI][vars[vn_FN]]:
# if player has already been suggested previously
if player in vars[vn_PI][vars[vn_FN]][playerRec]:
diffPlayer = data['ontology'][champs[key]][random.randrange(len(data['ontology'][champs[key]]))]
vars[vn_PI][vars[vn_FN]][playerRec].append(diffPlayer)
vars[playerRec] = diffPlayer
return True
#player has not yet been suggested
else:
vars[vn_PI][vars[vn_FN]][playerRec].append(player)
vars[playerRec] = player
return True
#player has not yet been suggested
else:
vars[vn_PI][vars[vn_FN]][playerRec] = []
newPlayer = data['ontology'][champs[key]][random.randrange(len(data['ontology'][champs[key]]))]
vars[vn_PI][vars[vn_FN]][playerRec].append(newPlayer)
vars[playerRec] = newPlayer
return True
mystr = ngrams.text().split()
#iterates through player text
for word in mystr:
#if champion in ontology
if word.lower() in data['ontology']:
# user's favorite champion stored as temp variable
vars[fav_champ] = word.capitalize()
# if user already has said their favorite champion
if fav_champ in vars[vn_PI][vars[vn_FN]]:
pass
# user has not yet said their favorite champion
else:
vars[vn_PI][vars[vn_FN]][fav_champ] = word.capitalize()
# grabs player that plays this champion
player = data['ontology'][word.lower()][random.randrange(len(data['ontology'][word.lower()]))]
# playerRec has alreayd been made
if playerRec in vars[vn_PI][vars[vn_FN]]:
# if player has already been suggested previously
if player in vars[vn_PI][vars[vn_FN]][playerRec]:
diffPlayer = data['ontology'][word.lower()][random.randrange(len(data['ontology'][word.lower()]))]
vars[vn_PI][vars[vn_FN]][playerRec].append(diffPlayer)
vars[playerRec] = diffPlayer
return True
# player has not yet been suggested
else:
vars[vn_PI][vars[vn_FN]][playerRec].append(player)
vars[playerRec] = player
return True
# player has not yet been suggested
else:
vars[vn_PI][vars[vn_FN]][playerRec] = []
newPlayer = data['ontology'][word.lower()][random.randrange(len(data['ontology'][word.lower()]))]
vars[vn_PI][vars[vn_FN]][playerRec].append(newPlayer)
vars[playerRec] = newPlayer
return True
return False
class MacroGPTJSON(Macro):
def __init__(self, request: str, full_ex: Dict[str, Any], empty_ex: Dict[str, Any] = None,
set_variables: Callable[[Dict[str, Any], Dict[str, Any]], None] = None) -> object:
"""
:rtype: object
:param request: the task to be requested regarding the user input (e.g., How does the speaker want to be called?).
:param full_ex: the example output where all values are filled (e.g., {"call_names": ["Mike", "Michael"]}).
:param empty_ex: the example output where all collections are empty (e.g., {"call_names": []}).
:param set_variables: it is a function that takes the STDM variable dictionary and the JSON output dictionary and sets necessary variables.
"""
self.request = request
self.full_ex = json.dumps(full_ex)
self.empty_ex = '' if empty_ex is None else json.dumps(empty_ex)
self.check = re.compile(regexutils.generate(full_ex))
self.set_variables = set_variables
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
examples = f'{self.full_ex} or {self.empty_ex} if unavailable' if self.empty_ex else self.full_ex
prompt = f'{self.request} Respond in the JSON schema such as {examples}: {ngrams.raw_text().strip()}'
output = gpt_completion(prompt)
# print(output)
if not output: return False
try:
d = json.loads(output)
except JSONDecodeError:
return False
if self.set_variables:
self.set_variables(vars, d)
else:
vars.update(d)
return True
class MacroNLG(Macro):
def __init__(self, generate: Callable[[Dict[str, Any]], str]):
self.generate = generate
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
return self.generate(vars)
def gpt_completion(input: str, regex: Pattern = None) -> str:
response = openai.ChatCompletion.create(
model=CHATGPT_MODEL,
messages=[{'role': 'user', 'content': input}]
)
output = response['choices'][0]['message']['content'].strip()
if regex is not None:
m = regex.search(output)
output = m.group().strip() if m else None
return output
#Section: casual communication: What's your favorite game?
def getFavGame(vars: Dict[str, Any]):
return vars['GameType']
def getReason(vars: Dict[str, Any]):
return vars['WhyInterest']
def getActivityWithFriends(vars: Dict[str, Any]):
return vars['WithFriendActivities']
def getSportsEvent(vars: Dict[str, Any]):
return vars['SportEvent']
def PositiveAgreement(vars: Dict[str, Any]):
if vars['Agreement'] == 'yes':
return True
else:
return False
def NegativeAgreement(vars: Dict[str, Any]):
if vars['Agreement'] == 'no':
return True
else:
return False
class MacroGPTHAIKU(Macro):
def __init__(self, request: str, full_ex: Dict[str, Any], empty_ex: Dict[str, Any] = None,
set_variables: Callable[[Dict[str, Any], Dict[str, Any]], None] = None) -> object:
"""
:rtype: object
:param request: the task to be requested regarding the user input (e.g., How does the speaker want to be called?).
:param full_ex: the example output where all values are filled (e.g., {"call_names": ["Mike", "Michael"]}).
:param empty_ex: the example output where all collections are empty (e.g., {"call_names": []}).
:param set_variables: it is a function that takes the STDM variable dictionary and the JSON output dictionary and sets necessary variables.
"""
self.request = request
self.full_ex = json.dumps(full_ex)
self.empty_ex = '' if empty_ex is None else json.dumps(empty_ex)
self.check = re.compile(regexutils.generate(full_ex))
self.set_variables = set_variables
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
examples = f'{self.full_ex} or {self.empty_ex} if unavailable' if self.empty_ex else self.full_ex
prompt = f'{self.request} Respond in the JSON schema such as {examples}'
output = gpt_completion(prompt)
# print(output)
if not output: return False
try:
d = json.loads(output)
except JSONDecodeError:
return False
if self.set_variables:
self.set_variables(vars, d)
else:
vars.update(d)
return True
# This macro use analogy to explain the game goal according to the favorite game user select
class MacroGoalAnalogy(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
vn = 'GameType'
GameTypeResponseDic = {
'RPG': {
'GameGoalSim': 'You can conceive it as a 30 min Mini role play game with real person friends and foes, \n where each hero has different sets of power to achieve the ultimate goal: destroy the enemy nexus with your teammates. \n Along the process, side missions are seizing the available resources, ambushing the enemy champions, and getting buffs \n from neutral monsters, upgrading the weapons and defense... '
},
'shooter': {
'GameGoalSim': 'It\'s really similar to the shooter games like CS or Overwatch, as teammates cooperate \n in a competitive environment to achieve the ultimate goal. In here, it\'s destroying the turrets and finally the enemy nexus '
},
'towerDefense': {
'GameGoalSim': 'You can regard it as the tower defense game where you shall protect your turrets and bases from enemy attack. (ゝ∀・).'
},
'other': {
'GameGoalSim': 'The main goal of league of legends is to destroy the other team\'s base. And, of course, there are many obstacles on the way to final goal and side missions.'
}
}
if vn not in vars:
vars[vn] = 'Role play Game'
if vars[vn] == 'First-person shooter ':
return GameTypeResponseDic['shooter']['GameGoalSim']
if vars[vn] == 'Tower defense':
return GameTypeResponseDic['towerDefense']['GameGoalSim']
if vars[vn] == 'Role play Game':
return GameTypeResponseDic['RPG']['GameGoalSim']
else:
return GameTypeResponseDic['RPG']['GameGoalSim']
class MacroEsportAttitudeResponse(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
vn = 'EsportAttitude'
EsportAttitudeResponseDic = {
'LookingForward': {
'EsportAttitudeSim': 'It\'s definitely worth a try if you want to meet more friends with same interest !'
},
'Excitement': {
'EsportAttitudeSim': 'ヽ(゜▽゜ )-C<(/;◇;)/~,'
},
'Unwilling': {
'EsportAttitudeSim': '(。í _ ì。), '
},
'Open-mindedness': {
'EsportAttitudeSim': 'I agree with you. Embracing new things is definitely a joy in the life ! '
},
'Indifference':{
'EsportAttitudeSim': 'That\'s fine. After all, one fun thing to be a person is each of us has different interest. '
},
'other': {
'EsportAttitudeSim': '(´~`),'
}
}
if vars[vn] == 'LookingForward':
return EsportAttitudeResponseDic['LookingForward']['EsportAttitudeSim']
if vars[vn] == 'Excitement':
return EsportAttitudeResponseDic['Excitement']['EsportAttitudeSim']
if vars[vn] == 'Unwilling':
return EsportAttitudeResponseDic['Unwilling']['EsportAttitudeSim']
if vars[vn] == 'Open-mindedness':
return EsportAttitudeResponseDic['Open-mindedness']['EsportAttitudeSim']
else:
return EsportAttitudeResponseDic['other']['EsportAttitudeSim']
class MacroFunTripError(Macro):
def run(self, ngrams: Ngrams, vars: Dict[str, Any], args: List[Any]):
n = random.randint(0, 4)
ErrorDic = {
1: {"error": 'I\'m sorry, we have to stay quiet around the fierce creatures, what do you want to know, the origin of the creature or power it beholds, \nor you want to continue the journey to behold more mysterious creatures'
},
2: {"error": 'Don\'t move further, it notice us. Let us first moves further from those irrational monsters. By the way, do you have other questions \n or you just want to move away from the burning place'
},
3: {"error": 'My apologies, my attentions are completely drawn away from those adorable monsters. Do you see their scales glittering under the sunshine. \n That\'s definitely breathetaking. By the way, do you have questions over the origin of those creatures. \n If you want to move to another place, just let me know'
},
4: {"error": 'The wind is too loud there at rift, I didn\t hear you much just then. Could you repeat your questions or you want to continue our trip to visit other monsters'
},
5: {"error": 'We seemed to rushed into its territory. Let\'s first get rid of there... What questions did you have or you just want to leave'
},
}
match n:
case 0:
return ErrorDic[1]['error']
case 1:
return ErrorDic[2]['error']
case 2:
return ErrorDic[3]['error']
case 3:
return ErrorDic[4]['error']
case 4:
return ErrorDic[5]['error']
return True
def getChampionRecommendReason(vars: Dict[str, Any]):
print (vars['RecommendedChampion'])
return vars['ChampionRecommendReason']
| [
"INPUT"
] |
2024-01-10 | jlondonobo/llmovies | utils~prepare_data.py | import numpy as np
import pandas as pd
from dotenv import load_dotenv
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from langchain.vectorstores import Weaviate
from tqdm import tqdm
from weaviate_client import client
tqdm.pandas(desc="Processing embeddings")
def download_imdb_ratings() -> pd.DataFrame:
url = "https://datasets.imdbws.com/title.ratings.tsv.gz"
return pd.read_csv(url, sep="\t")
def add_imdb_ratings(movies: pd.DataFrame, imdb_ratings: pd.DataFrame) -> pd.DataFrame:
# Some movies have no IMDb id (None), so we need m:1
imdb_formatted = imdb_ratings.rename(
columns={
"tconst": "imdb_id",
"averageRating": "imdb_vote_average",
"numVotes": "imdb_vote_count",
}
)
merged = movies.merge(
imdb_formatted,
on="imdb_id",
how="left",
validate="m:1",
)
return merged
def read_movies(source: str) -> pd.DataFrame:
res = pd.read_parquet(source)
return res.assign(
providers=lambda df: df["providers"].apply(np.ndarray.tolist),
genres_list=lambda df: df["genres_list"].str.split(", "),
release_year=lambda df: pd.to_datetime(df["release_date"]).dt.year,
)
def parse_null_float(val: float) -> float | None:
if np.isnan(val):
return None
return val
def parse_null_int(val: int) -> int | None:
if np.isnan(val):
return None
return int(val)
def create_documents(data: pd.DataFrame) -> list[Document]:
docs = []
for _, row in data.iterrows():
properties = {
"show_id": row["id"],
"title": row["title"],
"release_year": parse_null_int(row["release_year"]),
"genres": row["genres_list"],
"trailer_url": row["trailer"],
"watch": row["provider_url"],
"providers": row["providers"],
"vote_average": parse_null_float(row["vote_average"]),
"vote_count": row["vote_count"],
"imdb_vote_average": parse_null_float(row["imdb_vote_average"]),
"imdb_vote_count": parse_null_int(row["imdb_vote_count"]),
"runtime": row["runtime"],
}
doc = Document(page_content=row["overview"], metadata=properties)
docs.append(doc)
return docs
def main():
load_dotenv()
DATA_SOURCE = "data/final_movies.parquet"
movies = read_movies(DATA_SOURCE)
imdb_ratings = download_imdb_ratings()
moviews_with_imbd_ratings = add_imdb_ratings(movies, imdb_ratings)
docs = create_documents(moviews_with_imbd_ratings)
embeddings = OpenAIEmbeddings()
Weaviate.from_documents(
docs,
embeddings,
index_name="Movie",
client=client,
text_key="overview",
)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | jlondonobo/llmovies | utils~input.py | from langchain.chains.query_constructor.base import AttributeInfo
from langchain.llms import OpenAI
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.schema import Document
from langchain.vectorstores import Weaviate
from utils.weaviate_client import client
CATEGORIES = [
"Action",
"Documentary",
"Family",
"Drama",
"Horror",
"Fantasy",
"Adventure",
"History",
"Romance",
"Music",
"Western",
"Animation",
"War",
"Comedy",
"Mystery",
"TV Movie",
"Thriller",
"Science Fiction",
"Crime",
]
METADATA_FIELD_INFO = [
AttributeInfo(
name="genres",
description="The genres of the movie. Must be one of the following: {', '.join(CATEGORIES)}}",
type="string or list[string]",
),
AttributeInfo(
name="release_year",
description="The year the movie was released",
type="float",
),
AttributeInfo(
name="imdb_vote_average",
description="A 1-10 rating for the movie",
type="float",
),
AttributeInfo(
name="imdb_vote_count",
description="The number of reviews the movie has on IMDB",
type="float",
),
]
def get_best_docs(input: str, providers: list[int]) -> list[Document]:
document_content_description = "Brief summary of a movie"
llm = OpenAI(temperature=0)
vectorstore = Weaviate(
client,
"Movie",
"text",
attributes=[
"title",
"release_year",
"runtime",
"genres",
"imdb_vote_count",
"imdb_vote_average",
"trailer_url",
"watch",
],
)
where_filter = {
"path": ["providers"],
"operator": "ContainsAny",
"valueNumber": [int(p) for p in providers],
}
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
METADATA_FIELD_INFO,
verbose=True,
search_kwargs={"k": 3, "where_filter": where_filter},
)
return retriever.get_relevant_documents(input)
| [] |
2024-01-10 | ginoabraham/azure-open-ai-embeddings-qna | code~utilities~helper.py | import os
import openai
from dotenv import load_dotenv
import logging
import re
import hashlib
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.vectorstores.base import VectorStore
from langchain.chains import ChatVectorDBChain
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.llm import LLMChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
from langchain.prompts import PromptTemplate
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import TokenTextSplitter, TextSplitter
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import TextLoader
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from utilities.formrecognizer import AzureFormRecognizerClient
from utilities.azureblobstorage import AzureBlobStorageClient
from utilities.translator import AzureTranslatorClient
from utilities.customprompt import PROMPT
from utilities.redis import RedisExtended
import pandas as pd
import urllib
from fake_useragent import UserAgent
class LLMHelper:
def __init__(self,
document_loaders : BaseLoader = None,
text_splitter: TextSplitter = None,
embeddings: OpenAIEmbeddings = None,
llm: AzureOpenAI = None,
temperature: float = None,
max_tokens: int = None,
custom_prompt: str = "",
vector_store: VectorStore = None,
k: int = None,
pdf_parser: AzureFormRecognizerClient = None,
blob_client: AzureBlobStorageClient = None,
enable_translation: bool = False,
translator: AzureTranslatorClient = None):
load_dotenv()
openai.api_type = "azure"
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Azure OpenAI settings
self.api_base = openai.api_base
self.api_version = openai.api_version
self.index_name: str = "embeddings"
self.model: str = os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', "text-embedding-ada-002")
self.deployment_name: str = os.getenv("OPENAI_ENGINE", os.getenv("OPENAI_ENGINES", "text-davinci-003"))
self.deployment_type: str = os.getenv("OPENAI_DEPLOYMENT_TYPE", "Text")
self.temperature: float = float(os.getenv("OPENAI_TEMPERATURE", 0.7)) if temperature is None else temperature
self.max_tokens: int = int(os.getenv("OPENAI_MAX_TOKENS", -1)) if max_tokens is None else max_tokens
self.prompt = PROMPT if custom_prompt == '' else PromptTemplate(template=custom_prompt, input_variables=["summaries", "question"])
# Vector store settings
self.vector_store_address: str = os.getenv('REDIS_ADDRESS', "localhost")
self.vector_store_port: int= int(os.getenv('REDIS_PORT', 6379))
self.vector_store_protocol: str = os.getenv("REDIS_PROTOCOL", "redis://")
self.vector_store_password: str = os.getenv("REDIS_PASSWORD", None)
if self.vector_store_password:
self.vector_store_full_address = f"{self.vector_store_protocol}:{self.vector_store_password}@{self.vector_store_address}:{self.vector_store_port}"
else:
self.vector_store_full_address = f"{self.vector_store_protocol}{self.vector_store_address}:{self.vector_store_port}"
self.chunk_size = int(os.getenv('CHUNK_SIZE', 500))
self.chunk_overlap = int(os.getenv('CHUNK_OVERLAP', 100))
self.document_loaders: BaseLoader = WebBaseLoader if document_loaders is None else document_loaders
self.text_splitter: TextSplitter = TokenTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap) if text_splitter is None else text_splitter
self.embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=self.model, chunk_size=1) if embeddings is None else embeddings
if self.deployment_type == "Chat":
self.llm: ChatOpenAI = ChatOpenAI(model_name=self.deployment_name, engine=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens) if llm is None else llm
else:
self.llm: AzureOpenAI = AzureOpenAI(deployment_name=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens) if llm is None else llm
self.vector_store: RedisExtended = RedisExtended(redis_url=self.vector_store_full_address, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
self.k : int = 3 if k is None else k
self.pdf_parser : AzureFormRecognizerClient = AzureFormRecognizerClient() if pdf_parser is None else pdf_parser
self.blob_client: AzureBlobStorageClient = AzureBlobStorageClient() if blob_client is None else blob_client
self.enable_translation : bool = False if enable_translation is None else enable_translation
self.translator : AzureTranslatorClient = AzureTranslatorClient() if translator is None else translator
self.user_agent: UserAgent() = UserAgent()
self.user_agent.random
def add_embeddings_lc(self, source_url):
try:
documents = self.document_loaders(source_url).load()
# Convert to UTF-8 encoding for non-ascii text
for(document) in documents:
try:
if document.page_content.encode("iso-8859-1") == document.page_content.encode("latin-1"):
document.page_content = document.page_content.encode("iso-8859-1").decode("utf-8", errors="ignore")
except:
pass
docs = self.text_splitter.split_documents(documents)
# Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half)
pattern = re.compile(r'[\x00-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]')
for(doc) in docs:
doc.page_content = re.sub(pattern, '', doc.page_content)
keys = []
for i, doc in enumerate(docs):
# Create a unique key for the document
source_url = source_url.split('?')[0]
filename = "/".join(source_url.split('/')[4:])
hash_key = hashlib.sha1(f"{source_url}_{i}".encode('utf-8')).hexdigest()
hash_key = f"doc:{self.index_name}:{hash_key}"
keys.append(hash_key)
doc.metadata = {"source": f"[{source_url}]({source_url}_SAS_TOKEN_PLACEHOLDER_)" , "chunk": i, "key": hash_key, "filename": filename}
self.vector_store.add_documents(documents=docs, redis_url=self.vector_store_full_address, index_name=self.index_name, keys=keys)
except Exception as e:
logging.error(f"Error adding embeddings for {source_url}: {e}")
raise e
def convert_file_and_add_embeddings(self, source_url, filename, enable_translation=False):
# Extract the text from the file
text = self.pdf_parser.analyze_read(source_url)
# Translate if requested
text = list(map(lambda x: self.translator.translate(x), text)) if self.enable_translation else text
# Upload the text to Azure Blob Storage
converted_filename = f"converted/{filename}.txt"
source_url = self.blob_client.upload_file("\n".join(text), f"converted/{filename}.txt", content_type='text/plain; charset=utf-8')
print(f"Converted file uploaded to {source_url} with filename {filename}")
# Update the metadata to indicate that the file has been converted
self.blob_client.upsert_blob_metadata(filename, {"converted": "true"})
self.add_embeddings_lc(source_url=source_url)
return converted_filename
def get_all_documents(self, k: int = None):
result = self.vector_store.similarity_search(query="*", k= k if k else self.k)
return pd.DataFrame(list(map(lambda x: {
'key': x.metadata['key'],
'filename': x.metadata['filename'],
'source': urllib.parse.unquote(x.metadata['source']),
'content': x.page_content,
'metadata' : x.metadata,
}, result)))
def get_semantic_answer_lang_chain(self, question, chat_history):
question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=False)
doc_chain = load_qa_with_sources_chain(self.llm, chain_type="stuff", verbose=True, prompt=self.prompt)
chain = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True,
# top_k_docs_for_context= self.k
)
result = chain({"question": question, "chat_history": chat_history})
context = "\n".join(list(map(lambda x: x.page_content, result['source_documents'])))
sources = "\n".join(set(map(lambda x: x.metadata["source"], result['source_documents'])))
container_sas = self.blob_client.get_container_sas()
result['answer'] = result['answer'].split('SOURCES:')[0].split('Sources:')[0].split('SOURCE:')[0].split('Source:')[0]
sources = sources.replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)
return question, result['answer'], context, sources
def get_embeddings_model(self):
OPENAI_EMBEDDINGS_ENGINE_DOC = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002'))
OPENAI_EMBEDDINGS_ENGINE_QUERY = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002'))
return {
"doc": OPENAI_EMBEDDINGS_ENGINE_DOC,
"query": OPENAI_EMBEDDINGS_ENGINE_QUERY
}
def get_completion(self, prompt, **kwargs):
if self.deployment_type == 'Chat':
return self.llm([HumanMessage(content=prompt)]).content
else:
return self.llm(prompt)
| [] |
2024-01-10 | lariar/AI-Executive-Order | load_documents.py | from langchain.document_loaders import PDFPlumberLoader
from langchain.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma, FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.document_loaders import UnstructuredXMLLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
openai_api_key = os.getenv('OPENAI_API_KEY')
promptlayer_api_key = os.getenv('PROMPTLAYER_API_KEY')
# Load documents from the 'docs' directory
documents = []
for file in os.listdir("docs"):
if file.endswith(".xml"):
xml_path = "./docs/" + file
loader = UnstructuredXMLLoader(xml_path)
documents.extend(loader.load())
if file.endswith(".pdf"):
pdf_path = "./docs/" + file
loader = PDFPlumberLoader(pdf_path)
documents.extend(loader.load())
elif file.endswith('.docx') or file.endswith('.doc'):
doc_path = "./docs/" + file
loader = Docx2txtLoader(doc_path)
documents.extend(loader.load())
elif file.endswith('.txt'):
text_path = "./docs/" + file
loader = TextLoader(text_path)
documents.extend(loader.load())
elif file.endswith('.csv'):
text_path = "./docs/" + file
loader = CSVLoader(text_path)
documents.extend(loader.load())
# Process and chunk up the text using CharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1000,
chunk_overlap = 0
)
documents = text_splitter.split_documents(documents)
# Create the vectorstore using FAISS
vectordb = FAISS.from_documents(documents, embedding=OpenAIEmbeddings())
| [
"PROMPTLAYER_API_KEY"
] |