subashini7 commited on
Commit
976eddd
·
verified ·
1 Parent(s): e2742cd

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +39 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Load the model and tokenizer
6
+ @st.cache_resource
7
+ def load_model():
8
+ model_name = "prithivMLmods/QwQ-LCoT-14B-Conversational"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_name,
12
+ device_map="auto", # Automatically assign to CPU/GPU
13
+ torch_dtype=torch.float16, # Mixed precision for large models
14
+ )
15
+ return tokenizer, model
16
+
17
+ # Load resources
18
+ tokenizer, model = load_model()
19
+
20
+ # Streamlit app UI
21
+ st.title("QwQ-LCoT Chatbot")
22
+ st.write("A conversational AI powered by QwQ-LCoT-14B. Ask me anything!")
23
+
24
+ # User input
25
+ user_input = st.text_input("You: ", "")
26
+
27
+ if st.button("Send"):
28
+ if user_input.strip():
29
+ with st.spinner("Generating response..."):
30
+ # Tokenize input
31
+ inputs = tokenizer(user_input, return_tensors="pt")
32
+ # Generate response
33
+ outputs = model.generate(**inputs, max_new_tokens=150, temperature=0.7)
34
+ # Decode response
35
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
+
37
+ # Display response
38
+ st.text_area("Bot:", value=response, height=150)
39
+
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ streamlit
4
+