Skip to content

Commit de75f4c

Browse files
committed
chore(tests): refactor integration tests; python managed containers
1 parent 83ef74a commit de75f4c

10 files changed

+169
-127
lines changed

__init__.py

Whitespace-only changes.

run-cache-test.sh

-15
This file was deleted.

run_integration_tests.sh

-90
This file was deleted.

run_integration_tests_on_lambda.sh

+2-1
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,8 @@ commands() {
172172
instance_rsync $INSTANCE_ID . docker-diffusers-api
173173
if [ $? -eq 1 ]; then return 1 ; fi
174174
instance_run_command $INSTANCE_ID "docker build -t gadicc/diffusers-api ." docker-diffusers-api
175-
instance_run_script $INSTANCE_ID run_integration_tests.sh docker-diffusers-api
175+
# instance_run_script $INSTANCE_ID run_integration_tests.sh docker-diffusers-api
176+
instance_run_command $INSTANCE_ID "pytest -s tests/integration" docker-diffusers-api
176177

177178
}
178179
commands

test.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,8 @@ def runTest(name, args, extraCallInputs, extraModelInputs):
182182
result = result["output"]
183183

184184
else:
185-
response = requests.post(TEST_URL, json=inputs)
185+
test_url = args.get("test_url", None) or TEST_URL
186+
response = requests.post(test_url, json=inputs)
186187
try:
187188
result = response.json()
188189
except requests.exceptions.JSONDecodeError as error:

tests/__init__.py

Whitespace-only changes.

tests/integration/__init__.py

Whitespace-only changes.

tests/integration/conftest.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
import pytest

tests/integration/lib.py

+156
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
import pytest
2+
import docker
3+
import atexit
4+
import time
5+
import boto3
6+
import os
7+
import requests
8+
9+
AWS_S3_DEFAULT_BUCKET="test"
10+
DOCKER_GW_IP="172.17.0.1" # will override below if found
11+
12+
myContainers = list()
13+
dockerClient = docker.DockerClient(base_url='unix://var/run/docker.sock', version="auto")
14+
for network in dockerClient.networks.list():
15+
if (network.attrs["Scope"] == "local" and network.attrs["Driver"] == "bridge"):
16+
DOCKER_GW_IP=network.attrs["IPAM"]["Config"][0]["Gateway"]
17+
break
18+
19+
i = 0
20+
21+
def startContainer(image, command = None, **kwargs):
22+
container = dockerClient.containers.run(
23+
image,
24+
command,
25+
auto_remove=True,
26+
detach=True,
27+
**kwargs,
28+
)
29+
myContainers.append(container)
30+
31+
while container.status != "running":
32+
time.sleep(1)
33+
container.reload()
34+
print(container.status)
35+
return container
36+
37+
_minioCache = None
38+
def getMinio():
39+
global _minioCache
40+
if _minioCache:
41+
return _minioCache
42+
43+
container = startContainer(
44+
"minio/minio",
45+
"server /data --console-address :9011",
46+
ports={9000:9010,9011:9011},
47+
)
48+
49+
endpoint_url = f"http://{DOCKER_GW_IP}:9010"
50+
51+
while True:
52+
time.sleep(1)
53+
response = None
54+
try:
55+
print(endpoint_url + "/minio/health/live")
56+
response = requests.get(endpoint_url + "/minio/health/live")
57+
except Exception as error:
58+
print(error)
59+
60+
if response and response.status_code == 200:
61+
break
62+
63+
s3 = boto3.client(
64+
's3',
65+
endpoint_url=endpoint_url,
66+
config=boto3.session.Config(signature_version='s3v4'),
67+
aws_access_key_id='minioadmin',
68+
aws_secret_access_key='minioadmin',
69+
aws_session_token=None,
70+
# verify=False,
71+
)
72+
73+
s3.create_bucket(Bucket=AWS_S3_DEFAULT_BUCKET)
74+
75+
result = {
76+
"container": container,
77+
"endpoint_url": endpoint_url,
78+
"s3": s3,
79+
}
80+
_minioCache = result
81+
return result
82+
83+
_ddaCache = None
84+
def getDDA(minio = None):
85+
global _ddaCache
86+
if _ddaCache:
87+
return _ddaCache
88+
89+
PORT=8010
90+
91+
environment = {
92+
"HF_AUTH_TOKEN": os.getenv("HF_AUTH_TOKEN"),
93+
"http_proxy": os.getenv("DDA_http_proxy"),
94+
"https_proxy": os.getenv("DDA_https_proxy"),
95+
"REQUESTS_CA_BUNDLE": os.getenv("DDA_http_proxy") and "/usr/local/share/ca-certificates/squid-self-signed.crt"
96+
}
97+
98+
if minio:
99+
environment.update({
100+
"AWS_ACCESS_KEY_ID": "minioadmin",
101+
"AWS_SECRET_ACCESS_KEY": "minioadmin",
102+
"AWS_DEFAULT_REGION": "",
103+
"AWS_S3_DEFAULT_BUCKET": "test",
104+
"AWS_S3_ENDPOINT_URL": minio["endpoint_url"],
105+
})
106+
107+
container = startContainer(
108+
"gadicc/diffusers-api",
109+
ports={8000:PORT},
110+
device_requests=[
111+
docker.types.DeviceRequest(count=-1, capabilities=[['gpu']])
112+
],
113+
environment=environment,
114+
)
115+
116+
url = f"http://{DOCKER_GW_IP}:{PORT}/"
117+
118+
while True:
119+
time.sleep(1)
120+
response = None
121+
try:
122+
# print(url + "healthcheck")
123+
response = requests.get(url + "healthcheck")
124+
except Exception as error:
125+
# print(error)
126+
continue
127+
128+
if response:
129+
if response.status_code == 200:
130+
result = response.json()
131+
if (result["state"] == "healthy" and result["gpu"] == True):
132+
print("Ready")
133+
break
134+
else:
135+
print(response)
136+
print(response.text)
137+
else:
138+
raise Exception("Unexpected status code from dda/healthcheck")
139+
140+
data = {
141+
"container": container,
142+
"minio": minio,
143+
"url": url,
144+
}
145+
146+
_ddaCache = data
147+
return data
148+
149+
def cleanup():
150+
print("cleanup")
151+
for container in myContainers:
152+
print("Stopping")
153+
print(container)
154+
container.stop()
155+
156+
atexit.register(cleanup)

tests_create_cache.py renamed to tests/integration/test_cloud_cache.py

+8-20
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,26 @@
1-
import pytest
2-
import boto3
3-
import os
41
import sys
2+
from .lib import getMinio, getDDA
53
from test import runTest
6-
from botocore.client import Config
7-
8-
AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", None)
9-
AWS_S3_DEFAULT_BUCKET = os.environ.get("AWS_S3_DEFAULT_BUCKET", None)
10-
11-
if AWS_S3_DEFAULT_BUCKET != "test":
12-
sys.stderr.write("Set AWS_S3_DEFAULT_BUCKET=test for tests")
13-
sys.exit(1)
14-
15-
s3 = boto3.resource(
16-
"s3",
17-
endpoint_url=AWS_S3_ENDPOINT_URL,
18-
config=Config(signature_version="s3v4"),
19-
)
20-
bucket = s3.Bucket(AWS_S3_DEFAULT_BUCKET)
21-
224

235
def test_cloud_cache_create_and_upload():
246
"""
257
Check if model exists in cloud cache bucket download otherwise, save
268
with safetensors, and upload model.tar.zst to bucket
279
"""
10+
minio = getMinio()
11+
print(minio)
12+
dda = getDDA(minio=minio)
13+
print(dda)
14+
2815
## bucket.objects.all().delete()
2916
result = runTest(
3017
"txt2img",
31-
{},
18+
{ "test_url": dda["url"] },
3219
{
3320
"MODEL_ID": "stabilityai/stable-diffusion-2-1-base",
3421
# "MODEL_ID": "hf-internal-testing/tiny-stable-diffusion-pipe",
3522
"MODEL_PRECISION": "fp16",
23+
"MODEL_REVISION": "fp16",
3624
"MODEL_URL": "s3://",
3725
},
3826
{"num_inference_steps": 1},

0 commit comments

Comments
 (0)