Spaces:
Sleeping
Sleeping
import os | |
import openai | |
os.environ["TOKENIZERS_PARALLELISM"] = "false" | |
os.environ["OPENAI_API_KEY"] | |
def save_file(input_file): | |
import shutil | |
import os | |
destination_dir = "/home/user/app/file/" | |
os.makedirs(destination_dir, exist_ok=True) | |
output_dir="/home/user/app/file/" | |
for file in input_file: | |
shutil.copy(file.name, output_dir) | |
return "File(s) saved successfully!" | |
def process_file(): | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.document_loaders import DirectoryLoader | |
from langchain.document_loaders import TextLoader | |
from langchain.document_loaders import Docx2txtLoader | |
from langchain.vectorstores import FAISS | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
import openai | |
loader1 = DirectoryLoader('/home/user/app/file/', glob="./*.pdf", loader_cls=PyPDFLoader) | |
document1 = loader1.load() | |
loader2 = DirectoryLoader('/home/user/app/file/', glob="./*.txt", loader_cls=TextLoader) | |
document2 = loader2.load() | |
loader3 = DirectoryLoader('/home/user/app/file/', glob="./*.docx", loader_cls=Docx2txtLoader) | |
document3 = loader3.load() | |
document1.extend(document2) | |
document1.extend(document3) | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=1000, | |
chunk_overlap=200, | |
length_function=len) | |
docs = text_splitter.split_documents(document1) | |
embeddings = OpenAIEmbeddings() | |
file_db = FAISS.from_documents(docs, embeddings) | |
file_db.save_local("/home/user/app/file_db/") | |
return "File(s) processed successfully!" | |
def formatted_response(docs, response): | |
formatted_output = response + "\n\nSources" | |
for i, doc in enumerate(docs): | |
source_info = doc.metadata.get('source', 'Unknown source') | |
page_info = doc.metadata.get('page', None) | |
file_name = source_info.split('/')[-1].strip() | |
if page_info is not None: | |
formatted_output += f"\n{file_name}\tpage no {page_info}" | |
else: | |
formatted_output += f"\n{file_name}" | |
return formatted_output | |
def search_file(question): | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import FAISS | |
from langchain.chains.question_answering import load_qa_chain | |
from langchain.callbacks import get_openai_callback | |
from langchain.llms import OpenAI | |
import openai | |
from langchain.chat_models import ChatOpenAI | |
embeddings = OpenAIEmbeddings() | |
file_db = FAISS.load_local("/home/user/app/file_db/", embeddings) | |
docs = file_db.similarity_search(question) | |
llm = ChatOpenAI(model_name='gpt-3.5-turbo') | |
chain = load_qa_chain(llm, chain_type="stuff") | |
with get_openai_callback() as cb: | |
response = chain.run(input_documents=docs, question=question) | |
print(cb) | |
return formatted_response(docs, response) | |
def local_search(question): | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import FAISS | |
from langchain.chains.question_answering import load_qa_chain | |
from langchain.callbacks import get_openai_callback | |
from langchain.llms import OpenAI | |
import openai | |
from langchain.chat_models import ChatOpenAI | |
embeddings = OpenAIEmbeddings() | |
file_db = FAISS.load_local("/home/user/app/local_db/", embeddings) | |
docs = file_db.similarity_search(question) | |
llm = ChatOpenAI(model_name='gpt-3.5-turbo') | |
chain = load_qa_chain(llm, chain_type="stuff") | |
with get_openai_callback() as cb: | |
response = chain.run(input_documents=docs, question=question) | |
print(cb) | |
return formatted_response(docs, response) | |
def delete_file(): | |
import shutil | |
path1 = "/home/user/app/file/" | |
path2 = "/home/user/app/file_db/" | |
try: | |
shutil.rmtree(path1) | |
shutil.rmtree(path2) | |
return "Deleted Successfully" | |
except: | |
return "Already Deleted" | |
def soap_refresh(): | |
import os | |
import gradio as gr | |
destination_folder = "/home/user/app/soap_docs/" | |
if not os.path.exists(destination_folder): | |
os.makedirs(destination_folder) | |
directory = '/home/user/app/soap_docs/' | |
file_list = [] | |
for root, dirs, files in os.walk(directory): | |
for file in files: | |
file_list.append(file) | |
return gr.Dropdown.update(choices=file_list) | |
def sbar_refresh(): | |
import os | |
import gradio as gr | |
destination_folder = "/home/user/app/sbar_docs/" | |
if not os.path.exists(destination_folder): | |
os.makedirs(destination_folder) | |
directory = '/home/user/app/sbar_docs/' | |
file_list = [] | |
for root, dirs, files in os.walk(directory): | |
for file in files: | |
file_list.append(file) | |
return gr.Dropdown.update(choices=file_list) | |
def ask_soap(doc_name, question): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
import openai | |
import docx | |
docx_path = "/home/user/app/soap_docs/" + doc_name | |
doc = docx.Document(docx_path) | |
extracted_text = "Extracted text:\n\n\n" | |
for paragraph in doc.paragraphs: | |
extracted_text += paragraph.text + "\n" | |
question = ( | |
"\n\nUse the 'Extracted text' to answer the following question:\n" + question | |
) | |
extracted_text += question | |
if extracted_text: | |
print(extracted_text) | |
else: | |
print("failed") | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
llm = ChatOpenAI(model_name="gpt-3.5-turbo") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
response = llm_chain.run(extracted_text) | |
return response | |
def ask_sbar(doc_name, question): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
import openai | |
import docx | |
docx_path = "/home/user/app/sbar_docs/" + doc_name | |
doc = docx.Document(docx_path) | |
extracted_text = "Extracted text:\n\n\n" | |
for paragraph in doc.paragraphs: | |
extracted_text += paragraph.text + "\n" | |
question = ( | |
"\n\nUse the 'Extracted text' to answer the following question:\n" + question | |
) | |
extracted_text += question | |
if extracted_text: | |
print(extracted_text) | |
else: | |
print("failed") | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
llm = ChatOpenAI(model_name="gpt-3.5-turbo") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
response = llm_chain.run(extracted_text) | |
return response | |
def search_gpt(question): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
llm = ChatOpenAI(model_name="gpt-3.5-turbo") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
response = llm_chain.run(question) | |
return response | |
def local_gpt(question): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
llm = ChatOpenAI(model_name="gpt-3.5-turbo") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
response = llm_chain.run(question) | |
return response | |
global output | |
def audio_text(filepath): | |
import openai | |
global output | |
audio = open(filepath, "rb") | |
transcript = openai.Audio.transcribe("whisper-1", audio) | |
output = transcript["text"] | |
return output | |
global soap_response | |
global sbar_response | |
def transcript_soap(text): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
global soap_response | |
question = ( | |
"Use the following context given below to generate a detailed SOAP Report:\n\n" | |
) | |
question += text | |
print(question) | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
word_count = len(text.split()) | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
if word_count < 2000: | |
llm = ChatOpenAI(model="gpt-3.5-turbo") | |
elif word_count < 5000: | |
llm = ChatOpenAI(model="gpt-4") | |
else: | |
llm = ChatOpenAI(model="gpt-4-32k") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
soap_response = llm_chain.run(question) | |
return soap_response | |
def transcript_sbar(text): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
global sbar_response | |
question = ( | |
"Use the following context given below to generate a detailed SBAR Report:\n\n" | |
) | |
question += text | |
print(question) | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
word_count = len(text.split()) | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
if word_count < 2000: | |
llm = ChatOpenAI(model="gpt-3.5-turbo") | |
elif word_count < 5000: | |
llm = ChatOpenAI(model="gpt-4") | |
else: | |
llm = ChatOpenAI(model="gpt-4-32k") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
sbar_response = llm_chain.run(question) | |
return sbar_response | |
def text_soap(): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
global output | |
global soap_response | |
output = output | |
question = ( | |
"Use the following context given below to generate a detailed SOAP Report:\n\n" | |
) | |
question += output | |
print(question) | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
word_count = len(output.split()) | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
if word_count < 2000: | |
llm = ChatOpenAI(model="gpt-3.5-turbo") | |
elif word_count < 5000: | |
llm = ChatOpenAI(model="gpt-4") | |
else: | |
llm = ChatOpenAI(model="gpt-4-32k") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
soap_response = llm_chain.run(question) | |
return soap_response | |
def text_sbar(): | |
from langchain.llms import OpenAI | |
from langchain import PromptTemplate, LLMChain | |
from langchain.chat_models import ChatOpenAI | |
global output | |
global sbar_response | |
output = output | |
question = ( | |
"Use the following context given below to generate a detailed SBAR Report:\n\n" | |
) | |
question += output | |
print(question) | |
template = """Question: {question} | |
Answer: Let's think step by step.""" | |
word_count = len(output.split()) | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
if word_count < 2000: | |
llm = ChatOpenAI(model="gpt-3.5-turbo") | |
elif word_count < 5000: | |
llm = ChatOpenAI(model="gpt-4") | |
else: | |
llm = ChatOpenAI(model="gpt-4-32k") | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
sbar_response = llm_chain.run(question) | |
return sbar_response | |
def soap_docx(name): | |
global soap_response | |
soap_response = soap_response | |
import docx | |
import os | |
destination_folder = "/home/user/app/soap_docs/" | |
if not os.path.exists(destination_folder): | |
os.makedirs(destination_folder) | |
path = f"/home/user/app/soap_docs/SOAP_{name}.docx" | |
doc = docx.Document() | |
doc.add_paragraph(soap_response) | |
doc.save(path) | |
return "Successfully saved .docx File" | |
def sbar_docx(name): | |
global sbar_response | |
sbar_response = sbar_response | |
import docx | |
import os | |
destination_folder = "/home/user/app/sbar_docs/" | |
if not os.path.exists(destination_folder): | |
os.makedirs(destination_folder) | |
path = f"/home/user/app/sbar_docs/SBAR_{name}.docx" | |
doc = docx.Document() | |
doc.add_paragraph(sbar_response) | |
doc.save(path) | |
return "Successfully saved .docx File" | |
import gradio as gr | |
css = """ | |
.col{ | |
max-width: 50%; | |
margin: 0 auto; | |
display: flex; | |
flex-direction: column; | |
justify-content: center; | |
align-items: center; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown("## <center>Medical App</center>") | |
with gr.Tab("SOAP and SBAR Note Creation"): | |
with gr.Column(elem_classes="col"): | |
with gr.Tab("From Recorded Audio"): | |
with gr.Column(): | |
mic_audio_input = gr.Audio(type="filepath", label="Speak to the Microphone") | |
mic_audio_button = gr.Button("Generate Transcript") | |
mic_audio_output = gr.Textbox(label="Output") | |
mic_text_soap_button = gr.Button("Generate SOAP Report") | |
mic_text_soap_output = gr.Textbox(label="Output") | |
mic_text_sbar_button = gr.Button("Generate SBAR Report") | |
mic_text_sbar_output = gr.Textbox(label="Output") | |
mic_docx_input = gr.Textbox(label="Enter the name of .docx File") | |
mic_soap_docx_button = gr.Button("Save SOAP .docx File") | |
mic_soap_docx_output = gr.Textbox(label="Output") | |
mic_sbar_docx_button = gr.Button("Save SBAR .docx File") | |
mic_sbar_docx_output = gr.Textbox(label="Output") | |
with gr.Tab("From Uploaded Audio"): | |
with gr.Column(): | |
upload_audio_input = gr.Audio(type="filepath", label="Upload Audio File here") | |
upload_audio_button = gr.Button("Generate Transcript") | |
upload_audio_output = gr.Textbox(label="Output") | |
upload_text_soap_button = gr.Button("Generate SOAP Report") | |
upload_text_soap_output = gr.Textbox(label="Output") | |
upload_text_sbar_button = gr.Button("Generate SBAR Report") | |
upload_text_sbar_output = gr.Textbox(label="Output") | |
upload_docx_input = gr.Textbox(label="Enter the name of .docx File") | |
upload_soap_docx_button = gr.Button("Save SOAP .docx File") | |
upload_soap_docx_output = gr.Textbox(label="Output") | |
upload_sbar_docx_button = gr.Button("Save SBAR .docx File") | |
upload_sbar_docx_output = gr.Textbox(label="Output") | |
with gr.Tab("From Text Transcript"): | |
with gr.Column(): | |
text_transcript_input = gr.Textbox(label="Enter your Transcript here") | |
text_text_soap_button = gr.Button("Generate SOAP Report") | |
text_text_soap_output = gr.Textbox(label="Output") | |
text_text_sbar_button = gr.Button("Generate SBAR Report") | |
text_text_sbar_output = gr.Textbox(label="Output") | |
text_docx_input = gr.Textbox(label="Enter the name of .docx File") | |
text_soap_docx_button = gr.Button("Save SOAP .docx File") | |
text_soap_docx_output = gr.Textbox(label="Output") | |
text_sbar_docx_button = gr.Button("Save SBAR .docx File") | |
text_sbar_docx_output = gr.Textbox(label="Output") | |
with gr.Tab("SOAP and SBAR Queries"): | |
with gr.Column(elem_classes="col"): | |
with gr.Tab("Query SOAP Reports"): | |
with gr.Column(): | |
soap_refresh_button = gr.Button("Refresh") | |
ask_soap_input = gr.Dropdown(label="Choose File") | |
ask_soap_question = gr.Textbox(label="Enter Question here") | |
ask_soap_button = gr.Button("Submit") | |
ask_soap_output = gr.Textbox(label="Output") | |
with gr.Tab("Query SBAR Reports"): | |
with gr.Column(): | |
sbar_refresh_button = gr.Button("Refresh") | |
ask_sbar_input = gr.Dropdown(label="Choose File") | |
ask_sbar_question = gr.Textbox(label="Enter Question here") | |
ask_sbar_button = gr.Button("Submit") | |
ask_sbar_output = gr.Textbox(label="Output") | |
with gr.Tab("All Queries"): | |
with gr.Column(elem_classes="col"): | |
local_search_input = gr.Textbox(label="Enter Question here") | |
local_search_button = gr.Button("Search") | |
local_search_output = gr.Textbox(label="Output") | |
local_gpt_button = gr.Button("Ask ChatGPT") | |
local_gpt_output = gr.Textbox(label="Output") | |
with gr.Tab("Documents Queries"): | |
with gr.Column(elem_classes="col"): | |
with gr.Tab("Upload and Process Documents"): | |
with gr.Column(): | |
file_upload_input = gr.Files(label="Upload File(s) here") | |
file_upload_button = gr.Button("Upload") | |
file_upload_output = gr.Textbox(label="Output") | |
file_process_button = gr.Button("Process") | |
file_process_output = gr.Textbox(label="Output") | |
with gr.Tab("Query Documents"): | |
with gr.Column(): | |
file_search_input = gr.Textbox(label="Enter Question here") | |
file_search_button = gr.Button("Search") | |
file_search_output = gr.Textbox(label="Output") | |
search_gpt_button = gr.Button("Ask ChatGPT") | |
search_gpt_output = gr.Textbox(label="Output") | |
file_delete_button = gr.Button("Delete") | |
file_delete_output = gr.Textbox(label="Output") | |
###################################################################################################### | |
file_upload_button.click(save_file, inputs=file_upload_input, outputs=file_upload_output) | |
file_process_button.click(process_file, inputs=None, outputs=file_process_output) | |
file_search_button.click(search_file, inputs=file_search_input, outputs=file_search_output) | |
search_gpt_button.click(search_gpt, inputs=file_search_input, outputs=search_gpt_output) | |
file_delete_button.click(delete_file, inputs=None, outputs=file_delete_output) | |
###################################################################################################### | |
local_search_button.click(local_search, inputs=local_search_input, outputs=local_search_output) | |
local_gpt_button.click(local_gpt, inputs=local_search_input, outputs=local_gpt_output) | |
####################################################################################################### | |
soap_refresh_button.click(soap_refresh, inputs=None, outputs=ask_soap_input) | |
ask_soap_button.click(ask_soap, inputs=[ask_soap_input, ask_soap_question], outputs=ask_soap_output) | |
sbar_refresh_button.click(sbar_refresh, inputs=None, outputs=ask_sbar_input) | |
ask_sbar_button.click(ask_sbar, inputs=[ask_sbar_input, ask_sbar_question], outputs=ask_sbar_output) | |
#################################################################################################### | |
mic_audio_button.click(audio_text, inputs=mic_audio_input, outputs=mic_audio_output) | |
mic_text_soap_button.click(text_soap, inputs=None, outputs=mic_text_soap_output) | |
mic_text_sbar_button.click(text_sbar, inputs=None, outputs=mic_text_sbar_output) | |
mic_soap_docx_button.click(soap_docx, inputs=mic_docx_input, outputs=mic_soap_docx_output) | |
mic_sbar_docx_button.click(sbar_docx, inputs=mic_docx_input, outputs=mic_sbar_docx_output) | |
#################################################################################################### | |
upload_audio_button.click(audio_text, inputs=upload_audio_input, outputs=upload_audio_output) | |
upload_text_soap_button.click(text_soap, inputs=None, outputs=upload_text_soap_output) | |
upload_text_sbar_button.click(text_sbar, inputs=None, outputs=upload_text_sbar_output) | |
upload_soap_docx_button.click(soap_docx, inputs=upload_docx_input, outputs=upload_soap_docx_output) | |
upload_sbar_docx_button.click(sbar_docx, inputs=upload_docx_input, outputs=upload_sbar_docx_output) | |
########################################################################################################### | |
text_text_soap_button.click(transcript_soap, inputs=text_transcript_input, outputs=text_text_soap_output) | |
text_text_sbar_button.click(transcript_sbar, inputs=text_transcript_input, outputs=text_text_sbar_output) | |
text_soap_docx_button.click(soap_docx, inputs=text_docx_input, outputs=text_soap_docx_output) | |
text_sbar_docx_button.click(sbar_docx, inputs=text_docx_input, outputs=text_sbar_docx_output) | |
############################################################################################################# | |
demo.queue() | |
demo.launch() | |