|
|
|
load model directly |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import |
|
tokenizer = AutoTokenizer.from_pretrained("m-a-p/ChatMusician-Base") |
|
model = AutoModelForCausalLM.from_pretrained("m-a-p/ChatMusician-Base") |
|
|
|
|
|
from transformers import pipeline |
|
|
|
|
|
pipe = pipeline("text-generation", model="m-a-p/ChatMusician-Base") |
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
MODEL_NAME = 'NousResearch/Genstruct-7B' |
|
|
|
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map='cuda', load_in_8bit=True) |
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
|
|
import sagemaker |
|
import boto3 |
|
from sagemaker.huggingface import HuggingFace |
|
|
|
try: |
|
role = sagemaker.get_execution_role() |
|
except ValueError: |
|
iam = boto3.client('iam') |
|
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] |
|
|
|
hyperparameters = { |
|
'model_name_or_path':'m-a-p/ChatMusician-Base', |
|
'output_dir':'/opt/ml/model' |
|
|
|
|
|
} |
|
|
|
|
|
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.37.0'} |
|
|
|
|
|
huggingface_estimator = HuggingFace( |
|
entry_point='run_translation.py', |
|
source_dir='./examples/pytorch/seq2seq', |
|
instance_type='ml.p3.2xlarge', |
|
instance_count=1, |
|
role=role, |
|
git_config=git_config, |
|
transformers_version='4.37.0', |
|
pytorch_version='2.1.0', |
|
py_version='py310', |
|
hyperparameters = hyperparameters |
|
) |
|
|
|
|
|
huggingface_estimator.fit() |
|
} |
|
|
|
} |
|
|
|
|
|
} |
|
|