Update README.md

#1
by sixsixcoder - opened
Files changed (1) hide show
  1. README.md +32 -0
README.md CHANGED
@@ -136,6 +136,38 @@ print(tokenizer.decode(out[0][input_len:], skip_special_tokens=True))
136
 
137
  ```
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  ## LICENSE
140
 
141
  The weights of the GLM-4 model are available under the terms of [LICENSE](LICENSE).
 
136
 
137
  ```
138
 
139
+ ### vLLM Lib(0.6.4 and later version) for inference:
140
+
141
+ ```Python
142
+ from transformers import AutoTokenizer
143
+ from vllm import LLM, SamplingParams
144
+
145
+ # GLM-4-9B-Chat-1M
146
+ # max_model_len, tp_size = 1048576, 4
147
+ # If you encounter OOM phenomenon, it is recommended to reduce max_model_len or increase tp_size
148
+ max_model_len, tp_size = 131072, 1
149
+ model_name = "THUDM/glm-4-9b-chat-hf"
150
+ prompt = [{"role": "user", "content": "what is your name?"}]
151
+
152
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
153
+ llm = LLM(
154
+ model=model_name,
155
+ tensor_parallel_size=tp_size,
156
+ max_model_len=max_model_len,
157
+ trust_remote_code=True,
158
+ enforce_eager=True,
159
+ # GLM-4-9B-Chat-1M-HF If you encounter OOM phenomenon, it is recommended to enable the following parameters
160
+ # enable_chunked_prefill=True,
161
+ # max_num_batched_tokens=8192
162
+ )
163
+ stop_token_ids = [151329, 151336, 151338]
164
+ sampling_params = SamplingParams(temperature=0.95, max_tokens=1024, stop_token_ids=stop_token_ids)
165
+
166
+ inputs = tokenizer.apply_chat_template(prompt, tokenize=False, add_generation_prompt=True)
167
+ outputs = llm.generate(prompts=inputs, sampling_params=sampling_params)
168
+
169
+ print(outputs[0].outputs[0].text)
170
+ ```
171
  ## LICENSE
172
 
173
  The weights of the GLM-4 model are available under the terms of [LICENSE](LICENSE).