colab t4
not run in colab t4
from transformers import AutoTokenizer, AutoModelForCausalLM
model_name = "relaxml/Llama-2-7b-chat-QTIP-2Bit"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
OSError Traceback (most recent call last)
in <cell line: 5>()
3 model_name = "relaxml/Llama-2-7b-chat-QTIP-2Bit"
4
----> 5 tokenizer = AutoTokenizer.from_pretrained(model_name)
6 model = AutoModelForCausalLM.from_pretrained(model_name)
1 frames
/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py in from_pretrained(cls, pretrained_model_name_or_path, cache_dir, force_download, local_files_only, token, revision, trust_remote_code, *init_inputs, **kwargs)
2018 # loaded directly from the GGUF file.
2019 if all(full_file_name is None for full_file_name in resolved_vocab_files.values()) and not gguf_file:
-> 2020 raise EnvironmentError(
2021 f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
2022 "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
OSError: Can't load tokenizer for 'relaxml/Llama-2-7b-chat-QTIP-2Bit'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'relaxml/Llama-2-7b-chat-QTIP-2Bit' is the correct path to a directory containing all relevant files for a LlamaTokenizerFast tokenizer.
from transformers import AutoTokenizer, AutoModelForCausalLM
model_name = "relaxml/Llama-2-7b-chat-QTIP-2Bit"
a = "relaxml/Llama-2-7b-chat-E8P-2Bit"
tokenizer = AutoTokenizer.from_pretrained(a)
model = AutoModelForCausalLM.from_pretrained(model_name)
Your session crashed after using all available RAM.
View runtime logs