Monke64's picture
Added app files
3370a4e
raw
history blame
1.74 kB
# import streamlit as st
# from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# import transformers
# import torch
#
# st.set_page_config(
# page_title="Falcon 11B"
# )
#
# st.title("Falcon 11B Showcase")
# @st.cache_resource
# def Chat_model():
# model_name = "tiiuae/falcon-11B"
# model = AutoModelForCausalLM.from_pretrained(model_name)
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# pipeline = transformers.pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# torch_dtype=torch.bfloat16,
# device_map="auto",
# )
# return pipeline,tokenizer
#
# def get_text_output(user_input,pipeline,tokenizer):
# sequences = pipeline(
# user_input,
# max_length=200,
# do_sample=True,
# top_k=10,
# num_return_sequences=1,
# eos_token_id=tokenizer.eos_token_id,
# )
# return sequences
#
# if "Falcon_messages" not in st.session_state:
# st.session_state.Falcon_messages = []
#
# if "Falcon_model" not in st.session_state:
# st.session_state.Falcon_model,st.session_state.tokeniser = Chat_model()
#
# for message in st.session_state.Falcon_messages:
# with st.chat_message(message["role"]):
# st.markdown(message["content"])
#
# if prompt := st.chat_input("What is up?"):
# st.session_state.Falcon_messages.append({"role": "user", "content": prompt})
# with st.chat_message("user"):
# st.markdown(prompt)
# with st.chat_message("assistant"):
# response = get_text_output(prompt,st.session_state.Falcon_model,st.session_state.tokeniser)
# st.session_state.Falcon_messages.append({"role": "assistant", "content": response})