remove unused import and update readme
Browse files- README.md +3 -0
- src/axolotl/utils/models.py +0 -1
README.md
CHANGED
@@ -171,6 +171,9 @@ base_model_ignore_patterns:
|
|
171 |
# if the base_model repo on hf hub doesn't include configuration .json files,
|
172 |
# you can set that here, or leave this empty to default to base_model
|
173 |
base_model_config: ./llama-7b-hf
|
|
|
|
|
|
|
174 |
# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
|
175 |
model_type: AutoModelForCausalLM
|
176 |
# Corresponding tokenizer for the model AutoTokenizer is a good choice
|
|
|
171 |
# if the base_model repo on hf hub doesn't include configuration .json files,
|
172 |
# you can set that here, or leave this empty to default to base_model
|
173 |
base_model_config: ./llama-7b-hf
|
174 |
+
# Optional tokenizer configuration override in case you want to use a different tokenizer
|
175 |
+
# than the one defined in the base model
|
176 |
+
tokenizer_config:
|
177 |
# If you want to specify the type of model to load, AutoModelForCausalLM is a good choice too
|
178 |
model_type: AutoModelForCausalLM
|
179 |
# Corresponding tokenizer for the model AutoTokenizer is a good choice
|
src/axolotl/utils/models.py
CHANGED
@@ -30,7 +30,6 @@ from axolotl.prompt_tokenizers import LLAMA_DEFAULT_PAD_TOKEN
|
|
30 |
|
31 |
if TYPE_CHECKING:
|
32 |
from peft import PeftConfig # noqa: F401
|
33 |
-
from transformers import PreTrainedTokenizer # noqa: F401
|
34 |
|
35 |
from axolotl.utils.dict import DictDefault # noqa: F401
|
36 |
|
|
|
30 |
|
31 |
if TYPE_CHECKING:
|
32 |
from peft import PeftConfig # noqa: F401
|
|
|
33 |
|
34 |
from axolotl.utils.dict import DictDefault # noqa: F401
|
35 |
|