File size: 1,201 Bytes
85ed592
85ca672
 
 
2c211dc
01108a5
3c59460
01108a5
86265d8
01108a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc7b7fb
845f89b
3c59460
 
fc7b7fb
01108a5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import streamlit as st
from datasets import load_dataset

load_dataset("juanberasategui/Crypto_Tweets")
load_dataset("Ghosthash/Tweets")
# from transformers import AutoModelForCausalLM, AutoTokenizer

# model_path = "cognitivecomputations/dolphin-2.8-mistral-7b-v02"

# tokenizer = AutoTokenizer.from_pretrained(model_path)
# model = AutoModelForCausalLM.from_pretrained(
#     model_path,
#     device_map="auto",
#     torch_dtype='auto'
# ).eval()
# text = st.text_input("enter text here")

# if text:
#     messages = [
#         {"role": "user", "content": text},
#     ]
#     input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
#     output_ids = model.generate(input_ids.to('cuda'))
#     response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
#     print(response)
#     st.json({
#         "response": response
#     })

from transformers import pipeline

pipe = pipeline("text-generation", model="cognitivecomputations/dolphin-2.8-mistral-7b-v02", device=1)
text = st.text_input("enter text here")

if text:
    response = pipe(text, max_new_tokens=1000)
    st.json(response)