Abbeite commited on
Commit
99d8abc
·
verified ·
1 Parent(s): c8ec4b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -17
app.py CHANGED
@@ -1,26 +1,42 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
 
4
  # Title of the web application
5
  st.title('Chest and Physical Limitations LLM Query')
6
 
7
- # Initialize the model pipeline
8
- # Ensure to use the correct model identifier
9
  model_name = "Abbeite/chest_and_physical_limitations"
10
- generator = pipeline('text-generation', model=model_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # User prompt input
13
- user_prompt = st.text_area("Enter your prompt here:")
 
14
 
15
- # Button to generate text
16
- if st.button('Generate'):
17
- if user_prompt:
18
- # Generate response
19
- try:
20
- response = generator(user_prompt, max_length=50, clean_up_tokenization_spaces=True)
21
- # Display the generated text
22
- st.text_area("Response:", value=response[0]['generated_text'], height=250, disabled=True)
23
- except Exception as e:
24
- st.error(f"Error generating response: {str(e)}")
25
- else:
26
- st.warning("Please enter a prompt.")
 
 
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Title of the web application
5
  st.title('Chest and Physical Limitations LLM Query')
6
 
7
+ # Initialize variables
 
8
  model_name = "Abbeite/chest_and_physical_limitations"
9
+ generator = None
10
+
11
+ # Function to check model existence and load it
12
+ def load_model(model_name):
13
+ try:
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name)
16
+ generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
17
+ return generator
18
+ except Exception as e:
19
+ st.error(f"Failed to load model {model_name}: {str(e)}")
20
+ return None
21
+
22
+ # Load the model
23
+ generator = load_model(model_name)
24
 
25
  # User prompt input
26
+ if generator: # Proceed only if the model is successfully loaded
27
+ user_prompt = st.text_area("Enter your prompt here:")
28
 
29
+ # Button to generate text
30
+ if st.button('Generate'):
31
+ if user_prompt:
32
+ # Generate response
33
+ try:
34
+ response = generator(user_prompt, max_length=50, clean_up_tokenization_spaces=True)
35
+ # Display the generated text
36
+ st.text_area("Response:", value=response[0]['generated_text'], height=250, disabled=True)
37
+ except Exception as e:
38
+ st.error(f"Error generating response: {str(e)}")
39
+ else:
40
+ st.warning("Please enter a prompt.")
41
+ else:
42
+ st.error("Model could not be loaded. Please ensure the model name is correct and try again.")