-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathguide.txt
More file actions
50 lines (46 loc) · 1.64 KB
/
guide.txt
File metadata and controls
50 lines (46 loc) · 1.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
## Running a HuggingFace model Deepseek-v2-lite with Torch example
# Apply a PVC creation
kubectl apply -f hf-cache-pvc.yaml
# Check for the PV info
kubectl describe pvc huggingface-cache-pvc
# Apply for running Deepseekv2-lite
kubectl apply -f deepseek-v2-lite-persistent-compatible.yaml
# Check the pod state
kubectl describe pod/deepseek-v2-lite-test-hbggb
# Monitor the job initalization
kubectl logs -f job.batch/deepseek-v2-lite-test
# Check the downloaded files withing the container
kubectl exec job.batch/deepseek-v2-lite-test -- du -sh /hf-cache/hub
# Or interactivelly
kubectl exec -it job.batch/deepseek-v2-lite-test -- bash
## Running a NIM inference service example
# Creating keys
# kubectl create secret generic <name-api-key> --from-literal="<key string>"
# Checking secrets
kubectl get secret
# Apply a PVC creation
kubectl apply -f nim-pvc.yaml
# Start service
kubectl apply -f llama-nemotron-inference-self.yaml
# Expose endpoint
kubectl apply -f service.yaml
# Test inference service with a multi-modal request
curl http://10.150.45.115:8000/v1/chat/completions -H "Content-Type: application/json" -d '{
"model": "nvidia/llama-3.1-nemotron-nano-vl-8b-v1",
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": "What is in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
}
}
]
}
],
"max_tokens": 200,
"temperature": 0.4
}'