File size: 401 Bytes
7def60a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
---
name: "llava"

config_file: |
  backend: llama-cpp
  context_size: 4096
  f16: true

  mmap: true
  roles:
    user: "USER:"
    assistant: "ASSISTANT:"
    system: "SYSTEM:"

  template:
    chat: |
      A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
      {{.Input}}
      ASSISTANT: