File size: 3,997 Bytes
14415d3
 
 
 
1c2e9be
14415d3
 
 
 
 
1c2e9be
 
 
14415d3
 
 
 
1c2e9be
 
cef0a6e
14415d3
 
1c2e9be
 
cef0a6e
14415d3
 
1c2e9be
 
cef0a6e
14415d3
 
 
ac15cea
14415d3
 
1c2e9be
ac15cea
 
 
 
 
 
1c2e9be
14415d3
 
 
 
 
 
 
ac15cea
 
 
 
 
14415d3
 
1c2e9be
14415d3
ac15cea
 
14415d3
 
 
 
1c2e9be
1d0c268
d249eac
 
 
14415d3
 
ac15cea
14415d3
 
d4f68f1
1c2e9be
14415d3
 
 
 
 
 
1d0c268
14415d3
 
 
ac15cea
14415d3
ac15cea
1c2e9be
ac15cea
263c6c5
14415d3
ac15cea
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import streamlit as st
from huggingface_hub import InferenceClient
import os

st.title("CODEFUSSION ☄")  

base_url = "https://api-inference.huggingface.co/models/"
API_KEY = os.environ.get('HUGGINGFACE_API_KEY')

model_links = {
    "LegacyLift🚀": base_url + "mistralai/Mistral-7B-Instruct-v0.2",  
    "ModernMigrate⭐": base_url + "mistralai/Mixtral-8x7B-Instruct-v0.1",  
    "RetroRecode🔄": base_url + "microsoft/Phi-3-mini-4k-instruct" 
}

model_info = {
    "LegacyLift🚀": {
        'description': """The LegacyLift model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
            \nThis model is best for minimal problem-solving, content writing, and daily tips.\n""",
        'logo': './11.jpg'
    },
    "ModernMigrate⭐": {
        'description': """The ModernMigrate model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
            \nThis model excels in coding, logical reasoning, and high-speed inference. \n""",
        'logo': './2.jpg'
    },
    "RetroRecode🔄": {
        'description': """The RetroRecode model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
          \nThis model is best suited for critical development, practical knowledge, and serverless inference.\n""",
        'logo': './3.jpg'
    },
}

def format_prompt(message, conversation_history, custom_instructions=None):
    prompt = ""
    if custom_instructions:
        prompt += f"[INST] {custom_instructions} [/INST]"
    
    prompt += "[CONV_HISTORY]\n"
    for role, content in conversation_history:
        prompt += f"{role.upper()}: {content}\n"
    prompt += "[/CONV_HISTORY]"
    
    prompt += f"[INST] {message} [/INST]"
    return prompt

def reset_conversation():
    '''
    Resets Conversation
    '''
    st.session_state.messages = []
    st.session_state.conversation_history = []

if "messages" not in st.session_state:
    st.session_state.messages = []
    st.session_state.conversation_history = []

models = [key for key in model_links.keys()]

selected_model = st.sidebar.selectbox("Select Model", models)
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
st.sidebar.button('Reset Chat', on_click=reset_conversation)

st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("*Generating the code might go slow if you are using low power resources *")

if "prev_option" not in st.session_state:
    st.session_state.prev_option = selected_model

if st.session_state.prev_option != selected_model:
    st.session_state.messages = []
    st.session_state.conversation_history = []
    st.session_state.prev_option = selected_model

repo_id = model_links[selected_model]

st.subheader(f'{selected_model}')

for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
    custom_instruction = "Act like a Human in conversation"
    with st.chat_message("user"):
        st.markdown(prompt)
    
    st.session_state.messages.append({"role": "user", "content": prompt})
    st.session_state.conversation_history.append(("user", prompt))

    formatted_text = format_prompt(prompt, st.session_state.conversation_history, custom_instruction)

    with st.chat_message("assistant"):
        client = InferenceClient(model=model_links[selected_model])
        output = client.text_generation(formatted_text, temperature=temp_values, max_new_tokens=3000, stream=True)
        response = "".join([chunk for chunk in output])
        st.markdown(response)
    
    st.session_state.messages.append({"role": "assistant", "content": response})
    st.session_state.conversation_history.append(("assistant", response))