Sergidev commited on
Commit
8d9af98
1 Parent(s): 9e5bdd2

Update modules/pmbl.py

Browse files
Files changed (1) hide show
  1. modules/pmbl.py +15 -15
modules/pmbl.py CHANGED
@@ -103,15 +103,15 @@ class PMBL:
103
 
104
  def generate_response_task(self, system_prompt, prompt, n_ctx):
105
  def generate_response_task(self, system_prompt, prompt, n_ctx):
106
- llm = Llama(
107
- model_path=self.model_path,
108
- n_ctx=n_ctx,
109
- n_threads=8,
110
- n_gpu_layers=-1,
111
- use_mlock=True,
112
- use_mmap=True,
113
- use_flash_attn=True
114
- )
115
 
116
  response = llm.generate(
117
  system_prompt,
@@ -156,12 +156,12 @@ class PMBL:
156
  conn.close()
157
 
158
  def generate_topic(self, prompt, response):
159
- llm = Llama(
160
- model_path=self.model_path,
161
- n_ctx=n_ctx,
162
- n_threads=2,
163
- n_gpu_layers=2,
164
- )
165
 
166
  system_prompt = f"Based on the following interaction between a user and an AI assistant, generate a concise topic for the conversation in 2-4 words:\n\nUser: {prompt}\nAssistant: {response}\n\nTopic:"
167
 
 
103
 
104
  def generate_response_task(self, system_prompt, prompt, n_ctx):
105
  def generate_response_task(self, system_prompt, prompt, n_ctx):
106
+ llm = Llama(
107
+ model_path=self.model_path,
108
+ n_ctx=n_ctx,
109
+ n_threads=8,
110
+ n_gpu_layers=-1,
111
+ use_mlock=True,
112
+ use_mmap=True,
113
+ use_flash_attn=True
114
+ )
115
 
116
  response = llm.generate(
117
  system_prompt,
 
156
  conn.close()
157
 
158
  def generate_topic(self, prompt, response):
159
+ llm = Llama(
160
+ model_path=self.model_path,
161
+ n_ctx=n_ctx,
162
+ n_threads=2,
163
+ n_gpu_layers=2,
164
+ )
165
 
166
  system_prompt = f"Based on the following interaction between a user and an AI assistant, generate a concise topic for the conversation in 2-4 words:\n\nUser: {prompt}\nAssistant: {response}\n\nTopic:"
167