Spaces:
Sleeping
Sleeping
import streamlit as st | |
from datasets import load_dataset | |
load_dataset("juanberasategui/Crypto_Tweets") | |
load_dataset("Ghosthash/Tweets") | |
# from transformers import AutoModelForCausalLM, AutoTokenizer | |
# model_path = "cognitivecomputations/dolphin-2.8-mistral-7b-v02" | |
# tokenizer = AutoTokenizer.from_pretrained(model_path) | |
# model = AutoModelForCausalLM.from_pretrained( | |
# model_path, | |
# device_map="auto", | |
# torch_dtype='auto' | |
# ).eval() | |
# text = st.text_input("enter text here") | |
# if text: | |
# messages = [ | |
# {"role": "user", "content": text}, | |
# ] | |
# input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt') | |
# output_ids = model.generate(input_ids.to('cuda')) | |
# response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True) | |
# print(response) | |
# st.json({ | |
# "response": response | |
# }) | |
from transformers import pipeline | |
pipe = pipeline("text-generation", model="cognitivecomputations/dolphin-2.8-mistral-7b-v02", device=1) | |
text = st.text_input("enter text here") | |
if text: | |
response = pipe(text, max_new_tokens=1000) | |
st.json(response) | |