|
--- |
|
license: apache-2.0 |
|
--- |
|
|
|
![image/webp](https://cdn-uploads.huggingface.co/production/uploads/6455cc8d679315e4ef16fbec/wZ0eCzTn2CzYB44cmaE6L.webp) |
|
|
|
This model was requested for further testing. Original [thread](https://huggingface.co/macadeliccc/Laser-WestLake-2x7b/discussions/1) |
|
|
|
## Code example |
|
|
|
```python |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
def generate_response(prompt): |
|
""" |
|
Generate a response from the model based on the input prompt. |
|
|
|
Args: |
|
prompt (str): Prompt for the model. |
|
|
|
Returns: |
|
str: The generated response from the model. |
|
""" |
|
# Tokenize the input prompt |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
|
|
# Generate output tokens |
|
outputs = model.generate(**inputs, max_new_tokens=256, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id) |
|
|
|
# Decode the generated tokens to a string |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return response |
|
|
|
# Load the model and tokenizer |
|
model_id = "macadeliccc/KunoichiLake-2x7b" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True) |
|
|
|
prompt = "Write a quicksort algorithm in python" |
|
|
|
# Generate and print responses for each language |
|
print("Response:") |
|
print(generate_response(prompt), "\n") |
|
``` |