|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from peft import PeftModel |
|
from huggingface_hub import login |
|
import os |
|
|
|
|
|
@st.cache_resource |
|
def load_model(): |
|
base_model_name = "Qwen/Qwen2.5-Coder-3B-Instruct" |
|
adapter_model_name = "mohamedyd/Natural-Coder-3B-Instruct-V1" |
|
|
|
|
|
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, trust_remote_code=True) |
|
|
|
|
|
config = PeftConfig.from_pretrained(adapter_model_name) |
|
|
|
|
|
model = PeftModel.from_pretrained(base_model, adapter_model_name, config=config) |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(base_model_name, trust_remote_code=True) |
|
|
|
return model, tokenizer |
|
|
|
model, tokenizer = load_model() |
|
|
|
|
|
st.title("Natural-Coder-3B-Instruct-V1 Model Interaction") |
|
|
|
user_input = st.text_area("Enter your prompt here:", height=150) |
|
|
|
if st.button("Generate Response"): |
|
if user_input: |
|
inputs = tokenizer(user_input, return_tensors="pt") |
|
outputs = model.generate(**inputs, max_length=512, num_return_sequences=1) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
st.write("Model Response:") |
|
st.write(response) |
|
else: |
|
st.write("Please enter a prompt to generate a response.") |