zac commited on
Commit
db1f513
β€’
0 Parent(s):

Duplicate from zac/llama-cpp-python

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +51 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Llama Cpp Python (LLM on CPU)
3
+ emoji: πŸ‘©β€πŸ’»
4
+ colorFrom: pink
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.37.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: zac/llama-cpp-python
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import copy
3
+ import time
4
+ import ctypes #to run on C api directly
5
+ import llama_cpp
6
+ from llama_cpp import Llama
7
+ from huggingface_hub import hf_hub_download #load from huggingfaces
8
+
9
+
10
+ llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/Dolphin-Llama2-7B-GGML", filename="dolphin-llama2-7b.ggmlv3.q4_1.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
11
+
12
+ history = []
13
+
14
+ pre_prompt = " The user and the AI are having a conversation : <|endoftext|> \n "
15
+
16
+ def generate_text(input_text, history):
17
+ print("history ",history)
18
+ print("input ", input_text)
19
+ temp =""
20
+ if history == []:
21
+ input_text_with_history = f"SYSTEM:{pre_prompt}"+ "\n" + f"USER: {input_text} " + "\n" +" ASSISTANT:"
22
+ else:
23
+ input_text_with_history = f"{history[-1][1]}"+ "\n"
24
+ input_text_with_history += f"USER: {input_text}" + "\n" +" ASSISTANT:"
25
+ print("new input", input_text_with_history)
26
+ output = llm(input_text_with_history, max_tokens=1024, stop=["<|prompter|>", "<|endoftext|>", "<|endoftext|> \n","ASSISTANT:","USER:","SYSTEM:"], stream=True)
27
+ for out in output:
28
+ stream = copy.deepcopy(out)
29
+ print(stream["choices"][0]["text"])
30
+ temp += stream["choices"][0]["text"]
31
+ yield temp
32
+
33
+
34
+ history =["init",input_text_with_history]
35
+
36
+
37
+
38
+ demo = gr.ChatInterface(generate_text,
39
+ title="LLM on CPU",
40
+ description="Running LLM with https://github.com/abetlen/llama-cpp-python. btw the text streaming thing was the hardest thing to impliment",
41
+ examples=["Hello", "Am I cool?", "Are tomatoes vegetables?"],
42
+ cache_examples=True,
43
+ retry_btn=None,
44
+ undo_btn="Delete Previous",
45
+ clear_btn="Clear",)
46
+ demo.queue(concurrency_count=1, max_size=5)
47
+ demo.launch()
48
+
49
+
50
+
51
+
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ llama-cpp-python
2
+ huggingface_hub