use cpu temporarily
Browse files- modules/pmbl.py +2 -2
modules/pmbl.py
CHANGED
@@ -106,7 +106,7 @@ class PMBL:
|
|
106 |
|
107 |
|
108 |
def generate_response_task(self, system_prompt, prompt, n_ctx):
|
109 |
-
llm = Llama(model_path=self.model_path, n_ctx=n_ctx, n_threads=8,
|
110 |
llm = llm.to("cuda") # Move the model to the GPU
|
111 |
|
112 |
response = llm(
|
@@ -154,7 +154,7 @@ class PMBL:
|
|
154 |
|
155 |
@spaces.GPU
|
156 |
def generate_topic(self, prompt, response):
|
157 |
-
llm = Llama(model_path=self.model_path, n_ctx=1690, n_threads=8,
|
158 |
llm = llm.to("cuda") # Move the model to the GPU
|
159 |
|
160 |
system_prompt = f"Based on the following interaction between a user and an AI assistant, generate a concise topic for the conversation in 2-4 words:\n\nUser: {prompt}\nAssistant: {response}\n\nTopic:"
|
|
|
106 |
|
107 |
|
108 |
def generate_response_task(self, system_prompt, prompt, n_ctx):
|
109 |
+
llm = Llama(model_path=self.model_path, n_ctx=n_ctx, n_threads=8, mlock=True)
|
110 |
llm = llm.to("cuda") # Move the model to the GPU
|
111 |
|
112 |
response = llm(
|
|
|
154 |
|
155 |
@spaces.GPU
|
156 |
def generate_topic(self, prompt, response):
|
157 |
+
llm = Llama(model_path=self.model_path, n_ctx=1690, n_threads=8, mlock=True)
|
158 |
llm = llm.to("cuda") # Move the model to the GPU
|
159 |
|
160 |
system_prompt = f"Based on the following interaction between a user and an AI assistant, generate a concise topic for the conversation in 2-4 words:\n\nUser: {prompt}\nAssistant: {response}\n\nTopic:"
|