File size: 2,146 Bytes
ea798ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# Loadb checkpoint shards: 0%|StatTerra | 0/3[00:00<00:00, Loading checkpoint shards: 33%|ββββ | 1/3 [00:01<00:03, 1.75s/it]Loading checkpoint shards: 67%|βββββββ | 2/3 [00:03<00:01, 1.72s/it]Loading checkpoint shards: 100%|ββββββββββ| 3/3 [00:04<00:00, 1.64s/it]Loading checkpoint shards: 100%|ββββββββββ| 3/3 [00:04<00:00, 1.66s/it]
load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
import
tokenizer = AutoTokenizer.from_pretrained("m-a-p/ChatMusician-Base")
model = AutoModelForCausalLM.from_pretrained("m-a-p/ChatMusician-Base")
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="m-a-p/ChatMusician-Base")
from transformers import AutoModelForCausalLM, AutoTokenizer
MODEL_NAME = 'NousResearch/Genstruct-7B'
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map='cuda', load_in_8bit=True)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'m-a-p/ChatMusician-Base',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.37.0/examples/pytorch/seq2seq
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.37.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_translation.py',
source_dir='./examples/pytorch/seq2seq',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.37.0',
pytorch_version='2.1.0',
py_version='py310',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
}
}
}
|