lukestanley commited on
Commit
ff938c3
·
1 Parent(s): f5a3b9d

Fix check for LLM_MODEL_PATH to avoid load error

Browse files
Files changed (1) hide show
  1. utils.py +1 -1
utils.py CHANGED
@@ -23,7 +23,7 @@ in_memory_llm = None
23
  LLM_MODEL_PATH = env.get("LLM_MODEL_PATH", None)
24
  USE_HTTP_SERVER = env.get("USE_HTTP_SERVER", "false").lower() == "true"
25
 
26
- if len(LLM_MODEL_PATH) > 0:
27
  print(f"Using local model from {LLM_MODEL_PATH}")
28
  else:
29
  print("No local LLM_MODEL_PATH environment variable set. We need a model, downloading model from HuggingFace Hub")
 
23
  LLM_MODEL_PATH = env.get("LLM_MODEL_PATH", None)
24
  USE_HTTP_SERVER = env.get("USE_HTTP_SERVER", "false").lower() == "true"
25
 
26
+ if LLM_MODEL_PATH and len(LLM_MODEL_PATH) > 0:
27
  print(f"Using local model from {LLM_MODEL_PATH}")
28
  else:
29
  print("No local LLM_MODEL_PATH environment variable set. We need a model, downloading model from HuggingFace Hub")