Update modules/pmbl.py
Browse files- modules/pmbl.py +2 -3
modules/pmbl.py
CHANGED
@@ -2,7 +2,6 @@ import sqlite3
|
|
2 |
from datetime import datetime
|
3 |
from llama_cpp import Llama
|
4 |
from concurrent.futures import ThreadPoolExecutor
|
5 |
-
import spaces
|
6 |
|
7 |
class PMBL:
|
8 |
def __init__(self, model_path):
|
@@ -102,7 +101,7 @@ class PMBL:
|
|
102 |
for chunk in response.result():
|
103 |
yield chunk
|
104 |
|
105 |
-
|
106 |
def generate_response_task(self, system_prompt, prompt, n_ctx):
|
107 |
llm = Llama(model_path=self.model_path, n_ctx=n_ctx, n_threads=8, mlock=True, n_gpu_layers=42)
|
108 |
|
@@ -149,7 +148,7 @@ class PMBL:
|
|
149 |
|
150 |
conn.close()
|
151 |
|
152 |
-
|
153 |
def generate_topic(self, prompt, response):
|
154 |
llm = Llama(model_path=self.model_path, n_ctx=1690, n_threads=8, mlock=True, n_gpu_layers=42)
|
155 |
|
|
|
2 |
from datetime import datetime
|
3 |
from llama_cpp import Llama
|
4 |
from concurrent.futures import ThreadPoolExecutor
|
|
|
5 |
|
6 |
class PMBL:
|
7 |
def __init__(self, model_path):
|
|
|
101 |
for chunk in response.result():
|
102 |
yield chunk
|
103 |
|
104 |
+
|
105 |
def generate_response_task(self, system_prompt, prompt, n_ctx):
|
106 |
llm = Llama(model_path=self.model_path, n_ctx=n_ctx, n_threads=8, mlock=True, n_gpu_layers=42)
|
107 |
|
|
|
148 |
|
149 |
conn.close()
|
150 |
|
151 |
+
|
152 |
def generate_topic(self, prompt, response):
|
153 |
llm = Llama(model_path=self.model_path, n_ctx=1690, n_threads=8, mlock=True, n_gpu_layers=42)
|
154 |
|