cheberle commited on
Commit
84d29be
·
1 Parent(s): 4460d3d
Files changed (1) hide show
  1. app.py +0 -1
app.py CHANGED
@@ -28,7 +28,6 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
28
  # For a standard FP16 or FP32 load (no bitsandbytes):
29
  model = AutoModelForCausalLM.from_pretrained(
30
  "cheberle/autotrain-35swc-b4r9z",
31
- device_map="auto", # Device automatically mapped across GPUs or CPU
32
  torch_dtype=torch.float16, # Or "auto", or float32
33
  trust_remote_code=True
34
  )
 
28
  # For a standard FP16 or FP32 load (no bitsandbytes):
29
  model = AutoModelForCausalLM.from_pretrained(
30
  "cheberle/autotrain-35swc-b4r9z",
 
31
  torch_dtype=torch.float16, # Or "auto", or float32
32
  trust_remote_code=True
33
  )