Bedrock ModelBuilder Example

Bedrock ModelBuilder Example#

# Configure AWS credentials and region
#! ada credentials update --provider=isengard --account=<> --role=Admin --profile=default --once
#! aws configure set region us-west-2
# Setup
import boto3
import json
import time
import random
from sagemaker.core.resources import TrainingJob
from sagemaker.serve.bedrock_model_builder import BedrockModelBuilder
# Configuration
TRAINING_JOB_NAME = 'meta-textgeneration-llama-3-2-1b-instruct-sft-20251123162832'
ROLE_ARN = "arn:aws:iam::<>:role/Admin"
REGION = 'us-west-2'
BUCKET = 'open-models-testing-pdx'
# Step 1: Get training job and prepare model path
training_job = TrainingJob.get(training_job_name=TRAINING_JOB_NAME)
print(f"Training job status: {training_job.training_job_status}")

# Use the hf_merged directory which has complete HuggingFace format
base_s3_path = training_job.model_artifacts.s3_model_artifacts
hf_model_path = base_s3_path.rstrip('/') + '/checkpoints/hf_merged/'
print(f"Using HF model path: {hf_model_path}")
# Step 2: Verify required files exist
s3_client = boto3.client('s3', region_name=REGION)

required_files = ['config.json', 'tokenizer.json', 'tokenizer_config.json', 'model.safetensors']
model_prefix = hf_model_path.replace(f's3://{BUCKET}/', '')

print("Checking required files:")
for file in required_files:
    try:
        s3_client.head_object(Bucket=BUCKET, Key=model_prefix + file)
        print(f"✅ {file}")
    except:
        print(f"❌ {file} - MISSING")
# Step 3: Create missing tokenizer files if needed
def ensure_tokenizer_files():
    # Create added_tokens.json (usually empty for Llama)
    try:
        s3_client.head_object(Bucket=BUCKET, Key=model_prefix + 'added_tokens.json')
        print("✅ added_tokens.json exists")
    except:
        s3_client.put_object(
            Bucket=BUCKET,
            Key=model_prefix + 'added_tokens.json',
            Body=json.dumps({}),
            ContentType='application/json'
        )
        print("✅ Created added_tokens.json")

ensure_tokenizer_files()
# Debug: Check what's actually in the S3 bucket
print("Checking S3 structure...")
base_prefix = base_s3_path.replace(f's3://{BUCKET}/', '')
print(f"Base prefix: {base_prefix}")

# List files to see the actual structure
response = s3_client.list_objects_v2(
    Bucket=BUCKET,
    Prefix=base_prefix,
    Delimiter='/'
)

print("Contents:")
if 'Contents' in response:
    for obj in response['Contents'][:10]:  # Show first 10 files
        print(f"  {obj['Key']}")

# Check specifically for hf_merged directory
hf_merged_prefix = base_prefix.rstrip('/') + '/checkpoints/hf_merged/'
print(f"\nChecking hf_merged path: {hf_merged_prefix}")

try:
    response = s3_client.list_objects_v2(Bucket=BUCKET, Prefix=hf_merged_prefix)
    if 'Contents' in response:
        print("Files in hf_merged:")
        for obj in response['Contents']:
            file_name = obj['Key'].replace(hf_merged_prefix, '')
            print(f"  {file_name}")
            
        # Now copy with correct paths
        for obj in response['Contents']:
            source_key = obj['Key']
            file_name = source_key.replace(hf_merged_prefix, '')
            dest_key = base_prefix.rstrip('/') + '/' + file_name
            
            try:
                s3_client.copy_object(
                    Bucket=BUCKET,
                    CopySource={'Bucket': BUCKET, 'Key': source_key},
                    Key=dest_key
                )
                print(f"✅ Copied {file_name}")
            except Exception as e:
                print(f"❌ Failed to copy {file_name}: {e}")
    else:
        print("No files found in hf_merged directory")
except Exception as e:
    print(f"Error: {e}")
# Step 4: Create Bedrock model builder and deploy
job_name = f"bedrock-import-{random.randint(1000, 9999)}-{int(time.time())}"
print(f"Job name: {job_name}")

# Create builder with correct model path
bedrock_builder = BedrockModelBuilder(
    model=training_job
)

# Deploy to Bedrock
deployment_result = bedrock_builder.deploy(
    job_name=job_name,
    imported_model_name=job_name,
    role_arn=ROLE_ARN
)

job_arn = deployment_result['jobArn']
print(f"Import job started: {job_arn}")
# Step 5: Wait for import to complete
bedrock_client = boto3.client('bedrock', region_name=REGION)

print("Waiting for import to complete...")
while True:
    response = bedrock_client.get_model_import_job(jobIdentifier=job_arn)
    status = response['status']
    print(f"Status: {status}")
    
    if status == 'Completed':
        imported_model_arn = response['importedModelArn']
        print(f"✅ Import completed!")
        print(f"Model ARN: {imported_model_arn}")
        break
    elif status in ['Failed', 'Stopped']:
        print(f"❌ Import failed: {status}")
        if 'failureMessage' in response:
            print(f"Error: {response['failureMessage']}")
        break
    
    time.sleep(30)
# Step 6: Test inference with correct format
if 'imported_model_arn' in locals():
    bedrock_runtime = boto3.client('bedrock-runtime', region_name=REGION)
    
    # Try ChatCompletion format (OpenAI-style)
    try:
        response = bedrock_runtime.invoke_model(
            modelId=imported_model_arn,
            body=json.dumps({
                "messages": [
                    {"role": "user", "content": "What is the capital of France?"}
                ],
                "max_tokens": 100,
                "temperature": 0.7
            })
        )
        
        result = json.loads(response['body'].read().decode())
        print("\n🎉 Inference successful (ChatCompletion format)!")
        print(f"Response: {result}")
        
    except Exception as e1:
        print(f"ChatCompletion failed: {e1}")
        
        # Try BedrockMetaCompletion format
        try:
            response = bedrock_runtime.invoke_model(
                modelId=imported_model_arn,
                body=json.dumps({
                    "prompt": "What is the capital of France?",
                    "max_gen_len": 100,
                    "temperature": 0.7,
                    "top_p": 0.9
                })
            )
            
            result = json.loads(response['body'].read().decode())
            print("\n🎉 Inference successful (BedrockMeta format)!")
            print(f"Response: {result}")
            
        except Exception as e2:
            print(f"BedrockMeta failed: {e2}")
            print("❌ Both formats failed. Check model documentation for correct format.")
else:
    print("❌ Import failed, cannot test inference")
# Optional: List all imported models
models = bedrock_client.list_imported_models()
print("\nAll imported models:")
for model in models['modelSummaries']:
    print(f"- {model['modelName']}: {model['modelArn']}")
from pprint import pprint
from sagemaker.core.resources import TrainingJob
from sagemaker.serve.bedrock_model_builder import BedrockModelBuilder

training_job = TrainingJob.get(training_job_name="kssharda-sft-lora-lite-2-ui-run-2bn3c-<>8",
                               region="us-east-1")
pprint(training_job.model_artifacts.s3_model_artifacts)
bedrock_model_builder = BedrockModelBuilder(
    model = training_job
)

bedrock_model_builder.deploy(job_name = "nargokul-26-01",
                             custom_model_name = "nargokul-26-01",
                             role_arn="arn:aws:iam::<>:role/Admin")
from sagemaker.ai_registry.dataset import DataSet

dataset = DataSet.get(name="arn:aws:sagemaker:us-east-1:<>:hub-content/MDG6N5CA58D0IJMC1OPJOPIKOS2VPPLP0AM6UBOT9D73B8A34HTG/DataSet/nova-2-0-sft-dataset/1.0.0")

pprint(dataset.__dict__)