VirtualLab commited on
Commit
8f9cb23
·
1 Parent(s): 987064e

Default model

Browse files
Files changed (1) hide show
  1. inference.py +0 -2
inference.py CHANGED
@@ -12,8 +12,6 @@ llm = Llama.from_pretrained(
12
 
13
  @app.route("/v1/completions", methods=["POST"])
14
  def generate():
15
- mem_info = subprocess.run(["cat", "b/proc/meminfo", "|", "grep", "MemTotal"], capture_output=True)
16
- print (mem_info)
17
  data = request.json
18
  prompt = data.get("prompt", "")
19
  max_tokens = data.get("max_tokens", 50)
 
12
 
13
  @app.route("/v1/completions", methods=["POST"])
14
  def generate():
 
 
15
  data = request.json
16
  prompt = data.get("prompt", "")
17
  max_tokens = data.get("max_tokens", 50)