Sergidev commited on
Commit
201275c
1 Parent(s): 2f93e4b

Update modules/pmbl.py

Browse files
Files changed (1) hide show
  1. modules/pmbl.py +2 -2
modules/pmbl.py CHANGED
@@ -104,7 +104,7 @@ class PMBL:
104
  for chunk in response.result():
105
  yield chunk
106
 
107
- @spaces.GPU
108
  def generate_response_task(self, system_prompt, prompt, n_ctx):
109
  llm = Llama(model_path=self.model_path, n_ctx=n_ctx, n_threads=8, n_gpu_layers=-1, mlock=True)
110
  llm = llm.to("cuda") # Move the model to the GPU
@@ -152,7 +152,7 @@ class PMBL:
152
 
153
  conn.close()
154
 
155
- @spaces.GPU
156
  def generate_topic(self, prompt, response):
157
  llm = Llama(model_path=self.model_path, n_ctx=1690, n_threads=8, n_gpu_layers=-1, mlock=True)
158
  llm = llm.to("cuda") # Move the model to the GPU
 
104
  for chunk in response.result():
105
  yield chunk
106
 
107
+
108
  def generate_response_task(self, system_prompt, prompt, n_ctx):
109
  llm = Llama(model_path=self.model_path, n_ctx=n_ctx, n_threads=8, n_gpu_layers=-1, mlock=True)
110
  llm = llm.to("cuda") # Move the model to the GPU
 
152
 
153
  conn.close()
154
 
155
+
156
  def generate_topic(self, prompt, response):
157
  llm = Llama(model_path=self.model_path, n_ctx=1690, n_threads=8, n_gpu_layers=-1, mlock=True)
158
  llm = llm.to("cuda") # Move the model to the GPU