Skip to content

Commit 0c92a5d

Browse files
authored
Merge pull request #43 from aws/add-ci-integration-tests
added ci integartion test
2 parents adb7bd7 + ead52ac commit 0c92a5d

File tree

3 files changed

+59
-24
lines changed

3 files changed

+59
-24
lines changed

.github/workflows/integ-test.yml

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
name: Run Tests
2+
3+
on:
4+
# pull_request:
5+
workflow_dispatch:
6+
7+
8+
jobs:
9+
test:
10+
runs-on: ubuntu-latest
11+
steps:
12+
- uses: actions/checkout@v2
13+
- name: Set up Python 3.6
14+
uses: actions/setup-python@v2
15+
with:
16+
python-version: 3.6
17+
- name: Install Python dependencies
18+
run: pip install -e .[test,dev]
19+
- name: Run Integration Tests
20+
env:
21+
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
22+
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
23+
AWS_DEFAULT_REGION: us-east-1
24+
run: make integ-test

.github/workflows/test.yml renamed to .github/workflows/unit-test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: Run Tests
1+
name: Run Unit-Tests
22

33
on: [pull_request]
44

tests/integ/test_models_from_hub.py

Lines changed: 34 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
22
import os
3+
import re
34

45
import numpy as np
56
import pytest
@@ -11,23 +12,34 @@
1112
from sagemaker.model import Model
1213

1314

14-
ROLE_NAME = "sagemaker_execution_role"
15-
PROFILE = "hf-sm"
16-
REGION = "us-east-1"
17-
os.environ["AWS_PROFILE"] = PROFILE # setting aws profile for our boto3 client
18-
os.environ["AWS_DEFAULT_REGION"] = REGION # current DLCs are only in us-east-1 available
15+
os.environ["AWS_DEFAULT_REGION"] = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")
16+
SAGEMAKER_EXECUTION_ROLE = os.environ.get("SAGEMAKER_EXECUTION_ROLE", "sagemaker_execution_role")
1917

20-
# TODO: Replace with released DLC images
21-
images = {
22-
"pytorch": {
23-
"cpu": "558105141721.dkr.ecr.us-east-1.amazonaws.com/huggingface-inference-pytorch:cpu-0.0.1",
24-
"gpu": "558105141721.dkr.ecr.us-east-1.amazonaws.com/huggingface-inference-pytorch:gpu-0.0.1",
25-
},
26-
"tensorflow": {
27-
"cpu": "558105141721.dkr.ecr.us-east-1.amazonaws.com/huggingface-inference-tensorflow:cpu-0.0.1",
28-
"gpu": "558105141721.dkr.ecr.us-east-1.amazonaws.com/huggingface-inference-tensorflow:gpu-0.0.1",
29-
},
30-
}
18+
19+
def get_framework_ecr_image(registry_id="763104351884", repository_name="huggingface-pytorch-inference", device="cpu"):
20+
client = boto3.client("ecr")
21+
22+
def get_all_ecr_images(registry_id, repository_name, result_key):
23+
response = client.list_images(
24+
registryId=registry_id,
25+
repositoryName=repository_name,
26+
)
27+
results = response[result_key]
28+
while "nextToken" in response:
29+
response = client.list_images(
30+
registryId=registry_id,
31+
nextToken=response["nextToken"],
32+
repositoryName=repository_name,
33+
)
34+
results.extend(response[result_key])
35+
return results
36+
37+
images = get_all_ecr_images(registry_id=registry_id, repository_name=repository_name, result_key="imageIds")
38+
image_tags = [image["imageTag"] for image in images]
39+
print(image_tags)
40+
image_regex = re.compile("\d\.\d\.\d-" + device + "-.{4}$")
41+
image = sorted(list(filter(image_regex.match, image_tags)), reverse=True)[0]
42+
return image
3143

3244

3345
@pytest.mark.parametrize(
@@ -47,19 +59,17 @@
4759
)
4860
@pytest.mark.parametrize(
4961
"framework",
50-
[
51-
"pytorch",
52-
],
53-
) # "tensorflow"])
62+
["pytorch", "tensorflow"],
63+
)
5464
@pytest.mark.parametrize(
5565
"device",
5666
[
57-
# "gpu",
67+
"gpu",
5868
"cpu",
5969
],
6070
)
6171
def test_deployment_from_hub(task, device, framework):
62-
image_uri = images[framework][device]
72+
image_uri = get_framework_ecr_image(repository_name=f"huggingface-{framework}-inference", device=device)
6373
name = f"hf-test-{framework}-{device}-{task}".replace("_", "-")
6474
model = task2model[task][framework]
6575
instance_type = "ml.m5.large" if device == "cpu" else "ml.g4dn.xlarge"
@@ -76,7 +86,7 @@ def test_deployment_from_hub(task, device, framework):
7686
image_uri=image_uri, # A Docker image URI.
7787
model_data=None, # The S3 location of a SageMaker model data .tar.gz
7888
env=env, # Environment variables to run with image_uri when hosted in SageMaker (default: None).
79-
role=ROLE_NAME, # An AWS IAM role (either name or full ARN).
89+
role=SAGEMAKER_EXECUTION_ROLE, # An AWS IAM role (either name or full ARN).
8090
name=name, # The model name
8191
sagemaker_session=sagemaker_session,
8292
)
@@ -127,6 +137,7 @@ def test_deployment_from_hub(task, device, framework):
127137
"p95_request_time": np.percentile(time_buffer, 95),
128138
"body": json.loads(response_body),
129139
}
140+
print(data)
130141
json.dump(data, outfile)
131142

132143
assert task2performance[task][device]["average_request_time"] >= np.mean(time_buffer)

0 commit comments

Comments
 (0)