File size: 956 Bytes
5f87533 7f51a5a 5f87533 7f51a5a 2856ccc 44131c9 5f87533 9861a8a b61717e 9861a8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments, Trainer, pipeline
from peft import PeftModel, PeftConfig
from huggingface_hub import login
import bitsandbytes as bnb
import torch
import time
import pandas as pd
import numpy as np
import streamlit as st
st.set_page_config(
page_title="Code Generation",
page_icon="🤖",
layout="wide",
initial_sidebar_state="expanded",
)
login(token='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', add_to_git_credential=True)
st.title("Code Generation")
st.write('MODEL: TinyPixel/Llama-2-7B-bf16-sharded')
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model_name='red1xe/Llama-2-7B-codeGPT'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model= AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config)
|