-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy paths3_functional_test.sh
More file actions
executable file
·1406 lines (1275 loc) · 61.8 KB
/
s3_functional_test.sh
File metadata and controls
executable file
·1406 lines (1275 loc) · 61.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
# S3 Functional Test Script
# Tests bucket operations, object put/get with various sizes, and range requests
# Don't exit on error - we want to count all failures
set +e
# When HSC_RDMA is set to a non-false value, build and use the rdma-featured binary.
if [[ -n "$HSC_RDMA" && "$HSC_RDMA" != "false" && "$HSC_RDMA" != "0" ]]; then
if [[ "$HSC_RDMA" == "cuobj" || "$HSC_RDMA" == "auto" ]]; then
echo "HSC_RDMA=$HSC_RDMA detected — building with --features cuobj ..."
cargo build --features cuobj 2>&1 | tail -3
# Build libs3_rdma_cuobj.so and copy it next to the hsc binary so that
# the runtime loader finds it via the "exe dir" search path.
_CUOBJ_SRC="${CUOBJ_SRC:-$(cd "$(dirname "$0")/../.." && pwd)/s3-rdma/providers/cuobj}"
if [[ -d "$_CUOBJ_SRC" ]]; then
echo "Building libs3_rdma_cuobj.so from $_CUOBJ_SRC ..."
CUOBJ_ROOT_DIR="${CUOBJ_ROOT_DIR:-/usr/local/cuda}" \
cargo build --manifest-path "$_CUOBJ_SRC/Cargo.toml" \
--target-dir "./target/cuobj-build" 2>&1 | tail -3
_SO="./target/cuobj-build/debug/libs3_rdma_cuobj.so"
if [[ -f "$_SO" ]]; then
cp "$_SO" "./target/debug/"
echo "Copied libs3_rdma_cuobj.so → ./target/debug/"
else
echo "Warning: libs3_rdma_cuobj.so not produced — RDMA may fall back to standard I/O"
fi
else
echo "Warning: cuobj source not found at $_CUOBJ_SRC — skipping libs3_rdma_cuobj.so build"
fi
else
echo "HSC_RDMA=$HSC_RDMA detected — building with --features rdma ..."
cargo build --features rdma 2>&1 | tail -3
fi
BINARY="./target/debug/hsc"
else
BINARY="./target/debug/hsc"
fi
# SSE configuration — set HSC_SSE to enable server-side encryption for all tests.
#
# AES256 — S3-managed AES-256 (transparent to reads; all tests work normally)
# aws:kms — AWS KMS encryption (set HSC_SSE_KMS_KEY_ID for a specific key)
# sse-c — Customer-provided AES-256 key (set HSC_SSE_C_KEY or auto-generated;
# 'hsc cmp --range' does not yet accept SSE-C keys so range-verification
# steps are skipped in this mode)
#
# Examples:
# HSC_SSE=AES256 ./examples/s3_functional_test.sh
# HSC_SSE=aws:kms HSC_SSE_KMS_KEY_ID=arn:aws:kms:us-east-1:123:key/abc ./examples/s3_functional_test.sh
# HSC_SSE=sse-c ./examples/s3_functional_test.sh # auto-generates a key
# HSC_SSE=sse-c HSC_SSE_C_KEY=<base64-32-bytes> ./examples/s3_functional_test.sh
HSC_SSE_KMS_KEY_ID="${HSC_SSE_KMS_KEY_ID:-}"
HSC_SSE_C_KEY="${HSC_SSE_C_KEY:-}"
SSE_UPLOAD_ARGS="" # injected into every local→S3 cp command
SSE_DOWNLOAD_ARGS="" # injected into every S3→local cp command (non-empty for sse-c only)
SSE_COPY_ARGS="" # injected into every S3→S3 cp command
if [[ -n "$HSC_SSE" ]]; then
case "${HSC_SSE,,}" in
aes256)
SSE_UPLOAD_ARGS="--sse AES256"
SSE_COPY_ARGS="--sse AES256"
;;
aws:kms)
SSE_UPLOAD_ARGS="--sse aws:kms"
SSE_COPY_ARGS="--sse aws:kms"
if [[ -n "$HSC_SSE_KMS_KEY_ID" ]]; then
SSE_UPLOAD_ARGS+=" --sse-kms-key-id $HSC_SSE_KMS_KEY_ID"
SSE_COPY_ARGS+=" --sse-kms-key-id $HSC_SSE_KMS_KEY_ID"
fi
;;
sse-c)
if [[ -z "$HSC_SSE_C_KEY" ]]; then
HSC_SSE_C_KEY=$(openssl rand -base64 32)
echo "HSC_SSE_C_KEY not set — auto-generated: $HSC_SSE_C_KEY"
fi
SSE_UPLOAD_ARGS="--sse-c AES256 --sse-c-key $HSC_SSE_C_KEY"
SSE_DOWNLOAD_ARGS="--sse-c AES256 --sse-c-key $HSC_SSE_C_KEY"
SSE_COPY_ARGS="--sse-c AES256 --sse-c-key $HSC_SSE_C_KEY --sse-c-copy-source AES256 --sse-c-copy-source-key $HSC_SSE_C_KEY"
;;
*)
echo "Warning: Unknown HSC_SSE='$HSC_SSE'. Valid values: AES256, aws:kms, sse-c"
;;
esac
fi
# Bucket name: first positional argument, or auto-generated.
# When a bucket is supplied the create (Step 1) and delete (Step 10) steps are skipped,
# allowing the script to be run against a pre-existing bucket.
#
# Usage:
# ./examples/s3_functional_test.sh # create+delete a temp bucket
# ./examples/s3_functional_test.sh my-bucket # use existing bucket, skip mb/rb
ENDPOINT="${AWS_ENDPOINT_URL}"
BUCKET_NAME="${1:-test-bucket-$(date +%s)}"
BUCKET_PROVIDED="${1:+true}" # non-empty when caller supplied a bucket name
TEST_DIR="./test_data"
RESULTS_DIR=$(mktemp -d)
trap 'rm -rf "$RESULTS_DIR"' EXIT
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Counters
SUCCESS_COUNT=0
ERROR_COUNT=0
FAILED_TESTS=() # accumulates every failure message for the end-of-run summary
FAILED_CMDS=() # parallel array: rerun command for each failure
# Object sizes to test
SIZES=("1b" "2b" "5b" "10b" "1k" "8k" "64k" "512k" "1m" "8m" "16m" "24m" "32m" "64m")
echo "========================================="
echo "S3 Functional Test"
echo "Endpoint: $ENDPOINT"
echo "Bucket: $BUCKET_NAME"
echo "SSE Mode: ${HSC_SSE:-none}"
echo "========================================="
# Create test data directory
mkdir -p "$TEST_DIR"
# Function to print success message
success() {
echo -e "${GREEN}✓ $1${NC}"
((SUCCESS_COUNT++))
}
# Function to print error message; optional $2 = rerun command string
error() {
echo -e "${RED}✗ $1${NC}"
((ERROR_COUNT++))
FAILED_TESTS+=("$1")
FAILED_CMDS+=("${2:-}")
}
# Function to print info message
info() {
echo -e "${YELLOW}→ $1${NC}"
}
# Timing helpers
SCRIPT_START=$SECONDS
STEP_START=$SECONDS
step_time() {
local elapsed=$((SECONDS - STEP_START))
local mins=$((elapsed / 60))
local secs=$((elapsed % 60))
if [ $mins -gt 0 ]; then
printf "${YELLOW} ⏱ %dm %02ds${NC}\n" $mins $secs
else
echo -e "${YELLOW} ⏱ ${secs}s${NC}"
fi
STEP_START=$SECONDS
}
# Collect PASS:/FAIL:/INFO:/RERUN: lines written by parallel subshells to per-job files.
# Each FAIL: line may be immediately followed by a RERUN: line that carries the
# reproduce command; collect_results attaches it to the most recent failure entry.
collect_results() {
for f in "$RESULTS_DIR"/job_*; do
[ -f "$f" ] || continue
while IFS= read -r line; do
case "$line" in
PASS:*) success "${line#PASS:}" ;;
FAIL:*) error "${line#FAIL:}" ;;
RERUN:*)
# Attach to the last recorded failure (parallel array index)
if [ ${#FAILED_TESTS[@]} -gt 0 ]; then
FAILED_CMDS[$(( ${#FAILED_TESTS[@]} - 1 ))]="${line#RERUN:}"
fi
;;
INFO:*) info "${line#INFO:}" ;;
*) echo "$line" ;;
esac
done < "$f"
done
rm -f "$RESULTS_DIR"/job_*
}
# SSE-aware full-file comparison (local file vs S3 object).
_hsc_cmp() {
local local_file="$1" s3uri="$2"
# shellcheck disable=SC2086
$BINARY cmp $SSE_DOWNLOAD_ARGS "$local_file" "$s3uri" 2>/dev/null
}
# Function to create test file
create_test_file() {
local size=$1
local filename="$TEST_DIR/testfile_${size}.dat"
info "Creating test file: $filename (size: $size)"
case $size in
*b) truncate -s ${size%b} "$filename" ;;
*k) truncate -s $((${size%k} * 1024)) "$filename" ;;
*m) truncate -s $((${size%m} * 1048576)) "$filename" ;;
esac
success "Created $filename ($(du -h "$filename" | cut -f1))"
}
# Step 1: Create bucket
echo ""
info "Step 1: Creating bucket '$BUCKET_NAME'..."
if $BINARY mb --ignore-existing "s3://$BUCKET_NAME"; then
success "Bucket ready: $BUCKET_NAME"
else
error "Failed to create bucket $BUCKET_NAME"
exit 1
fi
# Delete any leftover objects from a previous failed run
_existing=$($BINARY ls "s3://$BUCKET_NAME" 2>/dev/null | grep -c "^[0-9]" || true)
if [ "$_existing" -gt 0 ]; then
info "Bucket contains $_existing leftover object(s) — cleaning up before test..."
if $BINARY rm --recursive "s3://$BUCKET_NAME/" >/dev/null 2>&1; then
success "Bucket cleared"
else
error "Failed to clear bucket — aborting"
exit 1
fi
fi
# Step 2: Create test files and upload objects
step_time
echo ""
info "Step 2: Creating test files and uploading objects..."
# Create all test files in parallel (setup)
for size in "${SIZES[@]}"; do
(
filename="$TEST_DIR/testfile_${size}.dat"
case $size in
*b) truncate -s ${size%b} "$filename" ;;
*k) truncate -s $((${size%k} * 1024)) "$filename" ;;
*m) truncate -s $((${size%m} * 1048576)) "$filename" ;;
esac
) &
done
wait
info "All test files created"
# Upload all objects at once
info "Uploading test files to S3..."
# shellcheck disable=SC2086
if $BINARY sync $SSE_UPLOAD_ARGS "$TEST_DIR/" "s3://$BUCKET_NAME/" --exclude "*/multipart/*" --exclude "*/chunk_boundary/*" --exclude "*/chunk_downloads/*" --exclude "*/ec/*" --exclude "*/ec_dl/*" --exclude "*/sync_test/*" 2>/dev/null; then
success "Uploaded ${#SIZES[@]} test files via sync"
else
error "Failed to upload test files"
exit 1
fi
# List objects to verify
echo ""
info "Listing objects in bucket..."
$BINARY ls "s3://$BUCKET_NAME"
# Step 2b: Test Multipart Upload (via $BINARY cp for large files)
step_time
echo ""
info "Step 2b: Testing Multipart Upload (via $BINARY cp)..."
MULTIPART_SIZES=("1m" "16m" "32m")
mkdir -p "$TEST_DIR/multipart"
_JOB=0
for part_size in "${MULTIPART_SIZES[@]}"; do
(
multipart_file="$TEST_DIR/multipart/multipart_${part_size}_parts.dat"
object_key="multipart_${part_size}_parts.dat"
part1="$TEST_DIR/multipart/part1_${part_size}.dat"
part2="$TEST_DIR/multipart/part2_${part_size}.dat"
part3="$TEST_DIR/multipart/part3_${part_size}.dat"
echo "INFO:Creating multipart test file with ${part_size} parts..."
case $part_size in
1m) count=1 ;;
16m) count=16 ;;
32m) count=32 ;;
esac
truncate -s $((count * 1048576)) "$part1" &
truncate -s $((count * 1048576)) "$part2" &
truncate -s $((count * 1048576)) "$part3" &
wait
# Combine parts into one file
cat "$part1" "$part2" "$part3" > "$multipart_file"
echo "INFO:Uploading $object_key via $BINARY cp (multipart for large files)..."
if $BINARY cp $SSE_UPLOAD_ARGS "$multipart_file" "s3://$BUCKET_NAME/$object_key" >/dev/null 2>&1; then
echo "PASS:Uploaded $object_key"
# Verify full-object integrity using hsc cmp
echo "INFO:Verifying multipart upload integrity for $object_key..."
if _hsc_cmp "$multipart_file" "s3://$BUCKET_NAME/$object_key"; then
echo "PASS:Multipart upload integrity verified for $object_key"
else
echo "FAIL:Multipart upload integrity check failed for $object_key"
echo "RERUN:\$BINARY cp $SSE_UPLOAD_ARGS $multipart_file s3://\$BUCKET_NAME/$object_key && \$BINARY cmp $SSE_DOWNLOAD_ARGS $multipart_file s3://\$BUCKET_NAME/$object_key"
fi
else
echo "FAIL:Failed to upload $object_key"
echo "RERUN:\$BINARY cp $SSE_UPLOAD_ARGS $multipart_file s3://\$BUCKET_NAME/$object_key"
fi
# Clean up part files
rm -f "$part1" "$part2" "$part3"
) > "$RESULTS_DIR/job_${_JOB}" &
((_JOB++))
done
wait
collect_results
echo ""
info "Listing all objects (including multipart uploads)..."
$BINARY ls "s3://$BUCKET_NAME"
# Step 3: Download objects (full size) with integrity verification
step_time
echo ""
info "Step 3: Downloading objects (full size) and verifying data integrity..."
mkdir -p "$TEST_DIR/downloads"
_JOB=0
for size in "${SIZES[@]}"; do
(
object_key="testfile_${size}.dat"
download_file="$TEST_DIR/downloads/testfile_${size}.dat"
original_file="$TEST_DIR/testfile_${size}.dat"
echo "INFO:Downloading $object_key..."
if ! $BINARY cp $SSE_DOWNLOAD_ARGS "s3://$BUCKET_NAME/$object_key" "$download_file" >/dev/null 2>&1; then
echo "FAIL:Failed to download $object_key"
echo "RERUN:\$BINARY cp $SSE_DOWNLOAD_ARGS s3://\$BUCKET_NAME/$object_key /tmp/${object_key}_dl"
exit 0
fi
original_size=$(stat -c%s "$original_file")
download_size=$(stat -c%s "$download_file")
if [ "$original_size" -ne "$download_size" ]; then
echo "FAIL:Size mismatch for $object_key (expected: $original_size, got: $download_size)"
echo "RERUN:\$BINARY cp $SSE_DOWNLOAD_ARGS s3://\$BUCKET_NAME/$object_key /tmp/${object_key}_dl && stat -c%s /tmp/${object_key}_dl"
exit 0
fi
if $BINARY cmp "$original_file" "$download_file" 2>/dev/null; then
echo "PASS:Downloaded and verified $object_key (size: $download_size bytes, content: identical)"
else
echo "FAIL:Data integrity check failed for $object_key"
echo "RERUN:\$BINARY cmp $SSE_DOWNLOAD_ARGS \$TEST_DIR/$object_key s3://\$BUCKET_NAME/$object_key"
exit 0
fi
# Single stat call retrieves ETag, Content-Length, and SHA-256 checksum together.
stat_output=$($BINARY stat "s3://$BUCKET_NAME/$object_key" 2>/dev/null)
response_etag=$(echo "$stat_output" | grep "^ETag" | sed 's/ETag *: //' | tr -d '"')
response_content_length=$(echo "$stat_output" | grep "^Size" | sed 's/Size *: //; s/ bytes.*//')
response_checksum=$(echo "$stat_output" | grep "^SHA256" | awk '{print $3}')
# Check ETag header — S3 Express One Zone uses random/opaque ETags by design;
# any non-empty ETag is valid (multipart ETags contain "-").
if [ -n "$response_etag" ]; then
if [[ "$response_etag" == *"-"* ]]; then
echo "PASS:Response ETag (multipart): $response_etag"
else
echo "PASS:Response ETag present: $response_etag"
fi
else
echo "FAIL:Response ETag not found for $object_key"
echo "RERUN:\$BINARY stat s3://\$BUCKET_NAME/$object_key"
fi
# Verify SHA-256 checksum if the server returned one (requires upload with --checksum SHA256).
# Absence is not a failure — most uploads omit it and integrity is already covered by hsc cmp above.
if [ -n "$response_checksum" ]; then
expected_checksum=$(openssl dgst -sha256 -binary "$original_file" | base64)
if [ "$response_checksum" = "$expected_checksum" ]; then
echo "PASS:SHA-256 checksum verified: $response_checksum"
else
echo "FAIL:SHA-256 checksum mismatch (expected: $expected_checksum, got: $response_checksum)"
echo "RERUN:\$BINARY stat s3://\$BUCKET_NAME/$object_key"
fi
else
echo "INFO:SHA-256 checksum not returned by server for $object_key (skipped)"
fi
# Check Content-Length header
if [ -n "$response_content_length" ]; then
if [ "$response_content_length" -eq "$original_size" ]; then
echo "PASS:Response Content-Length correct: $response_content_length"
else
echo "FAIL:Response Content-Length mismatch (expected: $original_size, got: $response_content_length)"
echo "RERUN:\$BINARY stat s3://\$BUCKET_NAME/$object_key"
fi
else
echo "FAIL:Response Content-Length not found for $object_key"
echo "RERUN:\$BINARY stat s3://\$BUCKET_NAME/$object_key"
fi
) > "$RESULTS_DIR/job_${_JOB}" &
((_JOB++))
done
wait
collect_results
# Step 4: Test range requests with integrity verification using hsc cmp
step_time
echo ""
info "Step 4: Testing range requests and verifying data integrity with 'hsc cmp'..."
# check_range <ok_msg> <fail_msg> <original_file> <range_spec> <s3_uri>
# Emits PASS:/FAIL:/RERUN: to stdout so callers can run it in a background
# subshell and collect results via collect_results().
check_range() {
local ok_msg=$1 fail_msg=$2 orig=$3 range=$4 s3uri=$5
# shellcheck disable=SC2086
if $BINARY cmp $SSE_DOWNLOAD_ARGS --range "$range" "$orig" "$s3uri" 2>/dev/null; then
echo "PASS:$ok_msg"
else
echo "FAIL:$fail_msg"
echo "RERUN:\$BINARY cmp $SSE_DOWNLOAD_ARGS --range \"$range\" $orig $s3uri"
fi
}
# verify_range kept for backward compatibility (used as a plain boolean by callers)
verify_range() {
local original_file=$1
local range_spec=$2
local s3_uri=$3
# shellcheck disable=SC2086
if $BINARY cmp $SSE_DOWNLOAD_ARGS --range "$range_spec" "$original_file" "$s3_uri" 2>/dev/null; then
return 0
else
return 1
fi
}
# Test different ranges on 1m file
test_ranges=("bytes=0-1023" "bytes=1024-2047" "bytes=0-511" "bytes=512000-1048575")
_JOB=0
for range in "${test_ranges[@]}"; do
original_file="$TEST_DIR/testfile_1m.dat"
(
echo "INFO:Verifying testfile_1m.dat range: $range..."
check_range "Range verified: $range" "Range integrity failed: $range" \
"$original_file" "$range" "s3://$BUCKET_NAME/testfile_1m.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
done
(
echo "INFO:Testing range on large file (64m)..."
check_range "Range on 64m file verified: bytes=0-1048575 (1MB)" \
"Range on 64m file integrity failed: bytes=0-1048575" \
"$TEST_DIR/testfile_64m.dat" "bytes=0-1048575" "s3://$BUCKET_NAME/testfile_64m.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
echo "INFO:Testing middle range on 8m file..."
check_range "Middle range on 8m file verified: bytes=4194304-5242879" \
"Middle range on 8m file integrity failed: bytes=4194304-5242879" \
"$TEST_DIR/testfile_8m.dat" "bytes=4194304-5242879" "s3://$BUCKET_NAME/testfile_8m.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
echo "INFO:Testing last 1KB of 32m file..."
check_range "Last 1KB of 32m file verified: bytes=33553408-33554431" \
"Last 1KB of 32m file integrity failed: bytes=33553408-33554431" \
"$TEST_DIR/testfile_32m.dat" "bytes=33553408-33554431" "s3://$BUCKET_NAME/testfile_32m.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
echo ""
info "Testing range requests on multipart uploaded objects..."
info "Testing ranges on multipart object with 1m parts (3MB total)..."
(
check_range "Multipart 1m: First half of part 1 verified" \
"Multipart 1m: First half of part 1 integrity failed" \
"$TEST_DIR/multipart/multipart_1m_parts.dat" "bytes=0-524287" \
"s3://$BUCKET_NAME/multipart_1m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
echo "INFO: CRITICAL: Range across part 1->2 boundary"
check_range "Multipart 1m: Range across part boundary (part 1->2) verified" \
"Multipart 1m: Range across part boundary integrity failed" \
"$TEST_DIR/multipart/multipart_1m_parts.dat" "bytes=1048000-1049599" \
"s3://$BUCKET_NAME/multipart_1m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
check_range "Multipart 1m: Middle of part 2 verified" \
"Multipart 1m: Middle of part 2 integrity failed" \
"$TEST_DIR/multipart/multipart_1m_parts.dat" "bytes=1572864-2097151" \
"s3://$BUCKET_NAME/multipart_1m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
info "Testing ranges on multipart object with 16m parts (48MB total)..."
(
check_range "Multipart 16m: First 8MB of part 1 verified" \
"Multipart 16m: First 8MB integrity failed" \
"$TEST_DIR/multipart/multipart_16m_parts.dat" "bytes=0-8388607" \
"s3://$BUCKET_NAME/multipart_16m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
echo "INFO: CRITICAL: Range across 16MB part boundary"
check_range "Multipart 16m: Range across part boundary (16MB boundary) verified" \
"Multipart 16m: Range across part boundary integrity failed" \
"$TEST_DIR/multipart/multipart_16m_parts.dat" "bytes=16776192-16778239" \
"s3://$BUCKET_NAME/multipart_16m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
check_range "Multipart 16m: Range in part 3 verified" \
"Multipart 16m: Range in part 3 integrity failed" \
"$TEST_DIR/multipart/multipart_16m_parts.dat" "bytes=40000000-41000000" \
"s3://$BUCKET_NAME/multipart_16m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
info "Testing ranges on multipart object with 32m parts (96MB total)..."
(
check_range "Multipart 32m: End of part 1 verified" \
"Multipart 32m: End of part 1 integrity failed" \
"$TEST_DIR/multipart/multipart_32m_parts.dat" "bytes=33554000-33554431" \
"s3://$BUCKET_NAME/multipart_32m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
echo "INFO: CRITICAL: Range across part 2->3 boundary (64MB mark)"
check_range "Multipart 32m: Range across part 2->3 boundary (64MB) verified" \
"Multipart 32m: Range across part 2->3 boundary integrity failed" \
"$TEST_DIR/multipart/multipart_32m_parts.dat" "bytes=67108000-67109000" \
"s3://$BUCKET_NAME/multipart_32m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
(
echo "INFO: CRITICAL: Large range spanning all 3 parts (80MB)"
check_range "Multipart 32m: Large range spanning all parts verified (80MB)" \
"Multipart 32m: Large range spanning all parts integrity failed" \
"$TEST_DIR/multipart/multipart_32m_parts.dat" "bytes=10000000-90000000" \
"s3://$BUCKET_NAME/multipart_32m_parts.dat"
) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
wait
collect_results
# Step 5: Chunk boundary tests — putObject / getObject
step_time
echo ""
CHUNK_SIZE=${CHUNK_SIZE:-4194304} # 4 MB default; override with: CHUNK_SIZE=<bytes>
C=$CHUNK_SIZE
C2=$((CHUNK_SIZE * 2))
C3=$((CHUNK_SIZE * 3))
info "Step 5: Chunk boundary putObject/getObject (CHUNK_SIZE=${CHUNK_SIZE} bytes)..."
# Test object sizes: ±1 byte around each of the 1-, 2-, and 3-chunk boundaries
CB_LABELS=("chunk1_minus1" "chunk1_exact" "chunk1_plus1"
"chunk2_minus1" "chunk2_exact" "chunk2_plus1"
"chunk3_exact")
CB_BYTES=($((C-1)) $C $((C+1)) $((C2-1)) $C2 $((C2+1)) $C3)
mkdir -p "$TEST_DIR/chunk_boundary" "$TEST_DIR/chunk_downloads"
# Create all chunk-boundary files instantly with truncate (sparse, zero-filled)
for i in "${!CB_LABELS[@]}"; do
truncate -s "${CB_BYTES[$i]}" "$TEST_DIR/chunk_boundary/cb_${CB_LABELS[$i]}.dat"
done
info "Chunk-boundary test files created (${#CB_LABELS[@]} files)"
# Step 5a: putObject — upload all chunk-boundary files at once via sync
echo ""
info "Step 5a: putObject — uploading chunk-boundary files..."
# shellcheck disable=SC2086
if $BINARY sync $SSE_UPLOAD_ARGS "$TEST_DIR/chunk_boundary/" "s3://$BUCKET_NAME/" 2>/dev/null; then
success "putObject: uploaded ${#CB_LABELS[@]} chunk-boundary files"
else
error "putObject: sync failed for chunk-boundary files"
fi
# Step 5b: getObject — download and byte-verify every chunk-boundary file in parallel
echo ""
info "Step 5b: getObject — downloading and verifying chunk-boundary files..."
_JOB=0
for i in "${!CB_LABELS[@]}"; do
label=${CB_LABELS[$i]}; size=${CB_BYTES[$i]}
(
orig="$TEST_DIR/chunk_boundary/cb_${label}.dat"
dl="$TEST_DIR/chunk_downloads/cb_${label}.dat"
echo "INFO:getObject cb_${label}..."
if ! $BINARY cp $SSE_DOWNLOAD_ARGS "s3://$BUCKET_NAME/cb_${label}.dat" "$dl" >/dev/null 2>&1; then
echo "FAIL:getObject failed for cb_${label}"
echo "RERUN:\$BINARY cp $SSE_DOWNLOAD_ARGS s3://\$BUCKET_NAME/cb_${label}.dat \$TEST_DIR/chunk_downloads/cb_${label}.dat"
exit 0
fi
actual=$(stat -c%s "$dl")
if [ "$actual" -ne "$size" ]; then
echo "FAIL:getObject size mismatch cb_${label}: expected ${size}, got ${actual}"
echo "RERUN:\$BINARY cp $SSE_DOWNLOAD_ARGS s3://\$BUCKET_NAME/cb_${label}.dat \$TEST_DIR/chunk_downloads/cb_${label}.dat && stat -c%s \$TEST_DIR/chunk_downloads/cb_${label}.dat"
exit 0
fi
if $BINARY cmp "$orig" "$dl" 2>/dev/null; then
echo "PASS:getObject cb_${label} (${size} bytes, content identical)"
else
echo "FAIL:getObject data integrity failed for cb_${label}"
echo "RERUN:\$BINARY cmp $SSE_DOWNLOAD_ARGS \$TEST_DIR/chunk_boundary/cb_${label}.dat s3://\$BUCKET_NAME/cb_${label}.dat"
fi
) > "$RESULTS_DIR/job_${_JOB}" &
((_JOB++))
done
wait
collect_results
# Step 5c: getObjectRange — ±4-byte ranges targeting every chunk boundary
step_time
echo ""
info "Step 5c: getObjectRange — ±4 bytes at every chunk boundary..."
_cmp_range() {
local label=$1 range=$2
local orig="$TEST_DIR/chunk_boundary/cb_${label}.dat"
local s3uri="s3://$BUCKET_NAME/cb_${label}.dat"
# shellcheck disable=SC2086
if $BINARY cmp $SSE_DOWNLOAD_ARGS --range "$range" "$orig" "$s3uri" 2>/dev/null; then
echo "PASS:getObjectRange [cb_${label}] $range"
else
echo "FAIL:getObjectRange [cb_${label}] $range — FAILED"
echo "RERUN:\$BINARY cmp $SSE_DOWNLOAD_ARGS --range \"$range\" $orig $s3uri"
fi
}
# chunk1_exact (size=C): object fills exactly one chunk; test boundary edge bytes
info " [chunk1_exact size=${C}]"
_JOB=0
( _cmp_range chunk1_exact "bytes=0-0" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _cmp_range chunk1_exact "bytes=$((C-1))-$((C-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _cmp_range chunk1_exact "bytes=$((C/2))-$((C-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
# chunk1_plus1 (size=C+1): 1 byte spills into 2nd chunk — straddle chunk1 boundary
info " [chunk1_plus1 size=$((C+1))] — chunk1 boundary straddle"
( _cmp_range chunk1_plus1 "bytes=$((C-4))-$((C-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # last 4 bytes of chunk1
( _cmp_range chunk1_plus1 "bytes=$((C))-$((C))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # only byte in chunk2
( _cmp_range chunk1_plus1 "bytes=$((C-1))-$((C))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 1 byte each side of boundary
( _cmp_range chunk1_plus1 "bytes=$((C-4))-$((C))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 5 bytes crossing boundary
# chunk2_minus1 (size=2C-1): ends 1 byte before the 2nd chunk boundary
info " [chunk2_minus1 size=$((C2-1))]"
( _cmp_range chunk2_minus1 "bytes=$((C-4))-$((C+3))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # cross chunk1→2 boundary
( _cmp_range chunk2_minus1 "bytes=$((C2-5))-$((C2-2))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # last 4 bytes of object
# chunk2_exact (size=2C): two full chunks
info " [chunk2_exact size=${C2}]"
( _cmp_range chunk2_exact "bytes=$((C-4))-$((C+3))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # cross chunk1→2 boundary
( _cmp_range chunk2_exact "bytes=$((C-1))-$((C))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # single-byte straddle
( _cmp_range chunk2_exact "bytes=$((C2-4))-$((C2-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # last 4 bytes
( _cmp_range chunk2_exact "bytes=0-$((C2-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # full object
# chunk2_plus1 (size=2C+1): 1 byte spills into 3rd chunk — straddle chunk2 boundary
info " [chunk2_plus1 size=$((C2+1))] — chunk2 boundary straddle"
( _cmp_range chunk2_plus1 "bytes=$((C-4))-$((C+3))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # cross chunk1→2
( _cmp_range chunk2_plus1 "bytes=$((C2-1))-$((C2))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 1 byte each side of chunk2
( _cmp_range chunk2_plus1 "bytes=$((C2-4))-$((C2))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 5 bytes crossing chunk2→3
( _cmp_range chunk2_plus1 "bytes=$((C2))-$((C2))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # only byte in chunk3
# chunk3_exact (size=3C): three full chunks — every boundary exercised
info " [chunk3_exact size=${C3}] — all boundaries"
( _cmp_range chunk3_exact "bytes=$((C-4))-$((C+3))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk1→2
( _cmp_range chunk3_exact "bytes=$((C-1))-$((C))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk1→2 single-byte straddle
( _cmp_range chunk3_exact "bytes=$((C2-4))-$((C2+3))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk2→3
( _cmp_range chunk3_exact "bytes=$((C2-1))-$((C2))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk2→3 single-byte straddle
( _cmp_range chunk3_exact "bytes=$((C3-4))-$((C3-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # last 4 bytes of object
( _cmp_range chunk3_exact "bytes=$((C/2))-$((C2+C/2-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # large range spanning all boundaries
wait
collect_results
# Step 6: uploadPart — multipart with part sizes that cross storage chunk boundaries
step_time
echo ""
info "Step 6: uploadPart — multipart upload with chunk-misaligned part sizes..."
info " Part sizes 5M / 6M / 7M: none is a multiple of the ${CHUNK_SIZE}-byte storage chunk"
mkdir -p "$TEST_DIR/multipart_chunk"
MPC_SIZES=("5m" "6m" "7m") # 3 parts each; intentionally misaligned with 4M chunks
# Phase 1: create files and upload all objects in parallel
_JOB=0
for part_size in "${MPC_SIZES[@]}"; do
(
part_bytes=$((${part_size%m} * 1048576))
total=$((part_bytes * 3))
combined="$TEST_DIR/multipart_chunk/mpc_${part_size}x3.dat"
key="mpc_${part_size}x3.dat"
echo "INFO:Creating ${part_size}×3 file (${total} bytes)..."
truncate -s "$total" "$combined"
echo "INFO:uploadPart $key (${total} bytes)..."
if ! $BINARY cp $SSE_UPLOAD_ARGS "$combined" "s3://$BUCKET_NAME/$key" >/dev/null 2>&1; then
echo "FAIL:uploadPart failed for $key"
echo "RERUN:\$BINARY cp $SSE_UPLOAD_ARGS $combined s3://\$BUCKET_NAME/$key"
exit 0
fi
echo "PASS:uploadPart $key (${total} bytes)"
if ! _hsc_cmp "$combined" "s3://$BUCKET_NAME/$key"; then
echo "FAIL:uploadPart full-object integrity failed for $key"
echo "RERUN:\$BINARY cp $SSE_UPLOAD_ARGS $combined s3://\$BUCKET_NAME/$key && \$BINARY cmp $SSE_DOWNLOAD_ARGS $combined s3://\$BUCKET_NAME/$key"
exit 0
fi
echo "PASS:uploadPart full-object integrity verified for $key"
) > "$RESULTS_DIR/job_${_JOB}" &
((_JOB++))
done
wait
collect_results
# Phase 2: getObjectRange at every chunk boundary — all objects × all boundaries in parallel
_JOB=0
for part_size in "${MPC_SIZES[@]}"; do
part_bytes=$((${part_size%m} * 1048576))
total=$((part_bytes * 3))
combined="$TEST_DIR/multipart_chunk/mpc_${part_size}x3.dat"
key="mpc_${part_size}x3.dat"
num_chunks=$(( (total + C - 1) / C ))
for (( ci=1; ci < num_chunks; ci++ )); do
boundary=$((ci * C))
range="bytes=$((boundary - 4))-$((boundary + 3))"
(
if [[ -n "$SSE_DOWNLOAD_ARGS" ]]; then
echo "INFO:Range check skipped (SSE-C): $key chunk${ci}→$((ci+1)) boundary $range"
elif $BINARY cmp --range "$range" "$combined" "s3://$BUCKET_NAME/$key" 2>/dev/null; then
echo "PASS:getObjectRange $key chunk${ci}→$((ci+1)) boundary $range"
else
echo "FAIL:getObjectRange $key chunk${ci}→$((ci+1)) boundary FAILED $range"
echo "RERUN:\$BINARY cmp --range \"$range\" $combined s3://\$BUCKET_NAME/$key"
fi
) > "$RESULTS_DIR/job_${_JOB}" &
((_JOB++))
done
done
wait
collect_results
# Step 7: copyObject — server-side copy at sub-chunk, exact-chunk, and multi-chunk sizes
step_time
echo ""
info "Step 7: copyObject — server-side copy at chunk boundaries..."
mkdir -p "$TEST_DIR/copy_verify"
COPY_SRCS=("cb_chunk1_minus1.dat" "cb_chunk1_exact.dat" "cb_chunk1_plus1.dat"
"cb_chunk3_exact.dat" "mpc_5mx3.dat")
COPY_DSTS=("cp_sub_chunk.dat" "cp_exact_chunk.dat" "cp_cross_chunk.dat"
"cp_three_chunks.dat" "cp_multipart_5m.dat")
COPY_ORIGS=("$TEST_DIR/chunk_boundary/cb_chunk1_minus1.dat"
"$TEST_DIR/chunk_boundary/cb_chunk1_exact.dat"
"$TEST_DIR/chunk_boundary/cb_chunk1_plus1.dat"
"$TEST_DIR/chunk_boundary/cb_chunk3_exact.dat"
"$TEST_DIR/multipart_chunk/mpc_5mx3.dat")
_JOB=0
for i in "${!COPY_SRCS[@]}"; do
(
src=${COPY_SRCS[$i]}; dst=${COPY_DSTS[$i]}; orig=${COPY_ORIGS[$i]}
echo "INFO:copyObject $src → $dst..."
if ! $BINARY cp $SSE_COPY_ARGS "s3://$BUCKET_NAME/$src" "s3://$BUCKET_NAME/$dst" >/dev/null 2>&1; then
echo "FAIL:copyObject failed: $src → $dst"
echo "RERUN:\$BINARY cp $SSE_COPY_ARGS s3://\$BUCKET_NAME/$src s3://\$BUCKET_NAME/$dst"
exit 0
fi
echo "PASS:copyObject $src → $dst"
# Download the copy and byte-verify against the local original
dl="$TEST_DIR/copy_verify/$dst"
if $BINARY cp $SSE_DOWNLOAD_ARGS "s3://$BUCKET_NAME/$dst" "$dl" >/dev/null 2>&1 \
&& $BINARY cmp "$orig" "$dl" 2>/dev/null; then
echo "PASS:copyObject integrity verified: $dst matches $src"
else
echo "FAIL:copyObject integrity failed: $dst does not match $src"
echo "RERUN:\$BINARY cp $SSE_COPY_ARGS s3://\$BUCKET_NAME/$src s3://\$BUCKET_NAME/$dst && \$BINARY cmp $SSE_DOWNLOAD_ARGS $orig s3://\$BUCKET_NAME/$dst"
fi
) > "$RESULTS_DIR/job_${_JOB}" &
((_JOB++))
done
wait
collect_results
# Step 8: EC stripe boundary tests
#
# Each CHUNK_SIZE-byte chunk is stored under one of three storage policies:
# 3-replica : full chunk replicated 3× (no sub-chunk striping)
# EC 2+1 : chunk split into 2 data stripes → stripe = CHUNK_SIZE / 2
# EC 4+2 : chunk split into 4 data stripes → stripe = CHUNK_SIZE / 4
#
# Unique intra-chunk stripe boundaries (for default 4 MB chunk):
# EC 4+2 only : C/4 and 3C/4 (1 MB and 3 MB)
# EC 2+1 only : C/2 (2 MB)
# chunk : C (shared by all policies)
#
# Tests probe every combination of the form k·stripe ± {0,1} for both stripe
# sizes, across the first two storage chunks, plus a large 2-chunk+stripe object.
step_time
echo ""
S21=$((CHUNK_SIZE / 2)) # EC 2+1 data stripe size
S42=$((CHUNK_SIZE / 4)) # EC 4+2 data stripe size
info "Step 8: EC stripe boundary tests (CHUNK=${CHUNK_SIZE}B S42=${S42}B S21=${S21}B)"
mkdir -p "$TEST_DIR/ec" "$TEST_DIR/ec_dl" "$TEST_DIR/ec_copy" "$TEST_DIR/ec_mp"
# ── 8a: putObject / getObject at every EC stripe-derived size ────────────────
echo ""
info "Step 8a: putObject / getObject at EC stripe-boundary sizes..."
# Labels and byte sizes.
# Sizes already covered by Step 5 (C±1, 2C±1, 3C) are excluded.
# Covered boundaries per group:
# s42* → EC4+2 stripe-1 (C/4)
# s42x3* → EC4+2 stripe-3 (3C/4) ← last stripe before chunk boundary
# s21* → EC2+1 stripe (C/2) ← also EC4+2 stripe-2
# c_s42* → EC4+2 stripe-1 inside chunk-2
# c_s21* → EC2+1 stripe inside chunk-2
# c_s42x3*→ EC4+2 stripe-3 inside chunk-2
# c2_* → large 2-chunk objects spanning every boundary above
EC_LABELS=(
"s42_m1" "s42" "s42_p1" # EC4+2 stripe-1 boundary
"s42x3_m1" "s42x3" "s42x3_p1" # EC4+2 stripe-3 boundary
"s21_m1" "s21" "s21_p1" # EC2+1 stripe boundary
"c_s42_m1" "c_s42" "c_s42_p1" # EC4+2 stripe-1 inside chunk-2
"c_s21_m1" "c_s21" "c_s21_p1" # EC2+1 stripe inside chunk-2
"c_s42x3_m1" "c_s42x3" "c_s42x3_p1" # EC4+2 stripe-3 inside chunk-2
"c2_s42" "c2_s21" # 2-chunk + stripe (large objects)
)
EC_BYTES=(
$((S42-1)) $S42 $((S42+1))
$((3*S42-1)) $((3*S42)) $((3*S42+1))
$((S21-1)) $S21 $((S21+1))
$((C+S42-1)) $((C+S42)) $((C+S42+1))
$((C+S21-1)) $((C+S21)) $((C+S21+1))
$((C+3*S42-1)) $((C+3*S42)) $((C+3*S42+1))
$((2*C+S42)) $((2*C+S21))
)
# Create all EC test files in parallel (sparse zero-filled)
for i in "${!EC_LABELS[@]}"; do
truncate -s "${EC_BYTES[$i]}" "$TEST_DIR/ec/ec_${EC_LABELS[$i]}.dat" &
done
wait
info "EC stripe test files created (${#EC_LABELS[@]} files)"
# Upload all via sync
# shellcheck disable=SC2086
if $BINARY sync $SSE_UPLOAD_ARGS "$TEST_DIR/ec/" "s3://$BUCKET_NAME/" 2>/dev/null; then
success "putObject: uploaded ${#EC_LABELS[@]} EC stripe files"
else
error "putObject: sync failed for EC stripe files"
fi
# Download + byte-verify all in parallel
echo ""
info "Step 8a getObject: downloading and verifying EC stripe objects..."
_JOB=0
for i in "${!EC_LABELS[@]}"; do
label=${EC_LABELS[$i]}; size=${EC_BYTES[$i]}
(
orig="$TEST_DIR/ec/ec_${label}.dat"
dl="$TEST_DIR/ec_dl/ec_${label}.dat"
echo "INFO:getObject ec_${label}..."
if ! $BINARY cp $SSE_DOWNLOAD_ARGS "s3://$BUCKET_NAME/ec_${label}.dat" "$dl" >/dev/null 2>&1; then
echo "FAIL:getObject failed ec_${label}"
echo "RERUN:\$BINARY cp $SSE_DOWNLOAD_ARGS s3://\$BUCKET_NAME/ec_${label}.dat \$TEST_DIR/ec_dl/ec_${label}.dat"
exit 0
fi
actual=$(stat -c%s "$dl")
if [ "$actual" -ne "$size" ]; then
echo "FAIL:getObject size mismatch ec_${label}: expected ${size} got ${actual}"
echo "RERUN:\$BINARY cp $SSE_DOWNLOAD_ARGS s3://\$BUCKET_NAME/ec_${label}.dat \$TEST_DIR/ec_dl/ec_${label}.dat && stat -c%s \$TEST_DIR/ec_dl/ec_${label}.dat"
exit 0
fi
if $BINARY cmp "$orig" "$dl" 2>/dev/null; then
echo "PASS:getObject ec_${label} (${size}B content identical)"
else
echo "FAIL:getObject data integrity failed ec_${label}"
echo "RERUN:\$BINARY cmp $SSE_DOWNLOAD_ARGS \$TEST_DIR/ec/ec_${label}.dat s3://\$BUCKET_NAME/ec_${label}.dat"
fi
) > "$RESULTS_DIR/job_${_JOB}" &
((_JOB++))
done
wait
collect_results
# ── 8b: getObjectRange at every EC stripe boundary ────────────────────────────
step_time
echo ""
info "Step 8b: getObjectRange at every EC stripe boundary..."
# _ec_range <label> <range> — compare a range against the already-uploaded object.
_ec_range() {
local label=$1 range=$2
local orig="$TEST_DIR/ec/ec_${label}.dat"
local s3uri="s3://$BUCKET_NAME/ec_${label}.dat"
# shellcheck disable=SC2086
if $BINARY cmp $SSE_DOWNLOAD_ARGS --range "$range" "$orig" "$s3uri" 2>/dev/null; then
echo "PASS:getObjectRange [ec_${label}] $range"
else
echo "FAIL:getObjectRange [ec_${label}] $range — FAILED"
echo "RERUN:\$BINARY cmp $SSE_DOWNLOAD_ARGS --range \"$range\" $orig $s3uri"
fi
}
# EC4+2 stripe-1 boundary (offset = S42) — object size must be > S42
info " EC4+2 stripe-1 @ offset ${S42} (policy boundary: C/4)"
_JOB=0
( _ec_range "s42_p1" "bytes=$((S42-1))-$((S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 2B straddle
( _ec_range "s42_p1" "bytes=$((S42-4))-$((S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 4B before + boundary
# EC4+2 stripe-3 boundary (offset = 3*S42) — last stripe edge before chunk end
info " EC4+2 stripe-3 @ offset $((3*S42)) (policy boundary: 3C/4)"
( _ec_range "s42x3_p1" "bytes=$((3*S42-1))-$((3*S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 2B straddle
( _ec_range "s42x3_p1" "bytes=$((3*S42-4))-$((3*S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 4B before
# EC2+1 stripe boundary (offset = S21 = 2*S42)
info " EC2+1 stripe @ offset ${S21} (policy boundary: C/2)"
( _ec_range "s21_p1" "bytes=$((S21-1))-$((S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 2B straddle
( _ec_range "s21_p1" "bytes=$((S21-4))-$((S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _ec_range "s21_p1" "bytes=0-$((S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # first stripe + 1B
# Inside chunk-2: EC4+2 stripe-1 (offset = C+S42)
info " EC4+2 stripe-1 in chunk-2 @ offset $((C+S42))"
( _ec_range "c_s42_p1" "bytes=$((C+S42-1))-$((C+S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 2B straddle
( _ec_range "c_s42_p1" "bytes=$((C+S42-4))-$((C+S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _ec_range "c_s42_p1" "bytes=$((C-1))-$((C+S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk boundary → stripe
# Inside chunk-2: EC2+1 stripe (offset = C+S21)
info " EC2+1 stripe in chunk-2 @ offset $((C+S21))"
( _ec_range "c_s21_p1" "bytes=$((C+S21-1))-$((C+S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 2B straddle
( _ec_range "c_s21_p1" "bytes=$((C+S21-4))-$((C+S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _ec_range "c_s21_p1" "bytes=$((C-4))-$((C+S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # wide: chunk boundary → EC2+1 stripe
# Inside chunk-2: EC4+2 stripe-3 (offset = C+3*S42)
info " EC4+2 stripe-3 in chunk-2 @ offset $((C+3*S42))"
( _ec_range "c_s42x3_p1" "bytes=$((C+3*S42-1))-$((C+3*S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # 2B straddle
( _ec_range "c_s42x3_p1" "bytes=$((C+3*S42-4))-$((C+3*S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _ec_range "c_s42x3_p1" "bytes=$((C+S21-4))-$((C+3*S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # EC2+1→EC4+2 within chunk-2
# Large 2-chunk objects: traverse every stripe and chunk boundary in one object
info " Large object c2_s42 (${#}B): all boundaries"
( _ec_range "c2_s42" "bytes=$((S42-1))-$((S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # EC4+2 in chunk-1
( _ec_range "c2_s42" "bytes=$((3*S42-1))-$((3*S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # EC4+2 stripe-3 in chunk-1
( _ec_range "c2_s42" "bytes=$((S21-1))-$((S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # EC2+1 in chunk-1
( _ec_range "c2_s42" "bytes=$((C-1))-$((C))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk-1 → chunk-2
( _ec_range "c2_s42" "bytes=$((C+S42-1))-$((C+S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # EC4+2 in chunk-2
( _ec_range "c2_s42" "bytes=$((C+S21-1))-$((C+S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # EC2+1 in chunk-2
( _ec_range "c2_s42" "bytes=$((C+3*S42-1))-$((C+3*S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # EC4+2 stripe-3 in chunk-2
( _ec_range "c2_s42" "bytes=$((2*C-1))-$((2*C+S42-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk-2 → chunk-3 + full stripe
info " Large object c2_s21 (${#}B): all boundaries"
( _ec_range "c2_s21" "bytes=$((S42-1))-$((S42))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _ec_range "c2_s21" "bytes=$((S21-1))-$((S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _ec_range "c2_s21" "bytes=$((C-1))-$((C))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk boundary
( _ec_range "c2_s21" "bytes=$((C+S21-1))-$((C+S21))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++))
( _ec_range "c2_s21" "bytes=$((2*C-1))-$((2*C+S21-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # chunk-2→3 + entire EC2+1 stripe
( _ec_range "c2_s21" "bytes=0-$((2*C+S21-1))" ) > "$RESULTS_DIR/job_${_JOB}" & ((_JOB++)) # full object
wait
collect_results
# ── 8c: uploadPart — part sizes misaligned to EC stripes ─────────────────────
step_time
echo ""
info "Step 8c: uploadPart — part sizes misaligned to EC stripes..."
#
# Part sizes chosen so that every uploadPart boundary (where one part ends and
# the next begins) lands at an offset that is NOT a multiple of either stripe
# size, forcing the server to stitch EC stripes across part boundaries:
#
# S42+1 : 1B over one EC4+2 stripe (part ends mid-stripe-2)
# S21+1 : 1B over one EC2+1 stripe (part ends mid-stripe-2)
# 3*S42+1 : covers 3 EC4+2 stripes+1B (part ends 1B into next EC4+2 stripe)
# C+S42/2 : straddles a chunk boundary AND lands mid-stripe inside chunk-2
#
# Each object is 4 parts; after upload getObjectRange is tested at every
# EC stripe and chunk boundary that falls within the object.
EC_MP_PART_SIZES=($((S42+1)) $((S21+1)) $((3*S42+1)) $((C+S42/2)))
EC_MP_PART_LABELS=("s42p1x4" "s21p1x4" "3s42p1x4" "c_halfS42x4")
_JOB=0
# Phase 1: create files and upload all objects in parallel
for i in "${!EC_MP_PART_LABELS[@]}"; do
(
part_bytes=${EC_MP_PART_SIZES[$i]}
label=${EC_MP_PART_LABELS[$i]}
total=$((part_bytes * 4))
combined="$TEST_DIR/ec_mp/ec_mp_${label}.dat"
key="ec_mp_${label}.dat"
echo "INFO:uploadPart ec_mp_${label}: 4×${part_bytes}B = ${total}B..."
truncate -s "$total" "$combined"
if ! $BINARY cp $SSE_UPLOAD_ARGS "$combined" "s3://$BUCKET_NAME/$key" >/dev/null 2>&1; then
echo "FAIL:uploadPart failed ec_mp_${label}"
echo "RERUN:\$BINARY cp $SSE_UPLOAD_ARGS $combined s3://\$BUCKET_NAME/$key"
exit 0
fi
echo "PASS:uploadPart ec_mp_${label} (4×${part_bytes}B = ${total}B)"