waqasali1707 commited on
Commit
09e48aa
·
verified ·
1 Parent(s): 850b6c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -12
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import streamlit as st
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
3
 
4
  # Load model and tokenizer
@@ -14,15 +15,20 @@ text = st.text_area("Enter the starting text for your blog post:")
14
  generation_config = GenerationConfig(max_new_tokens=200, do_sample=True, temperature=0.7)
15
 
16
  if text:
17
- # Encode input
18
- inputs_encoded = tokenizer(text, return_tensors='pt')
19
-
20
- # Generate output
21
- model_output = model.generate(inputs_encoded["input_ids"], generation_config=generation_config)[0]
22
-
23
- # Decode output
24
- output = tokenizer.decode(model_output, skip_special_tokens=True)
25
-
26
- # Display result
27
- st.text("Generated Blog Post:")
28
- st.text(output)
 
 
 
 
 
 
1
  import streamlit as st
2
+ import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
4
 
5
  # Load model and tokenizer
 
15
  generation_config = GenerationConfig(max_new_tokens=200, do_sample=True, temperature=0.7)
16
 
17
  if text:
18
+ try:
19
+ # Encode input
20
+ inputs_encoded = tokenizer(text, return_tensors='pt')
21
+
22
+ # Generate output
23
+ with torch.no_grad():
24
+ model_output = model.generate(inputs_encoded["input_ids"], generation_config=generation_config)[0]
25
+
26
+ # Decode output
27
+ output = tokenizer.decode(model_output, skip_special_tokens=True)
28
+
29
+ # Display result
30
+ st.write("Generated Blog Post:")
31
+ st.write(output)
32
+
33
+ except Exception as e:
34
+ st.error(f"An error occurred: {e}")