-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathshared_memory_op_warp_compress.cc
More file actions
executable file
·2451 lines (2034 loc) · 142 KB
/
shared_memory_op_warp_compress.cc
File metadata and controls
executable file
·2451 lines (2034 loc) · 142 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#include "shared_memory_op_warp_compress.hpp"
shared_memory_template_warp_compress_t *init_shared_memory_template_warp_compress(shared_memory_template_t *old_template)
{
cout << "init_shared_memory_template_warp_compress: old API, not supported any more" << endl;
assert(old_template != NULL);
vector<unsigned long> new_thread_block_size_in_block_vec;
// 存储每个block的thread偏移量
vector<unsigned long> new_block_begin_thread_index_offset_vec;
// 遍历所有block,查看所有warp内的threadsize是不是相等
assert(old_template->block_begin_warp_index_offset != NULL);
for (unsigned long block_index = 0; block_index < (old_template->size_of_block_begin_warp_index_offset - 1); block_index++)
{
// 起始和结束的warp
unsigned long warp_begin_index_in_block = read_from_array_with_data_type(old_template->block_begin_warp_index_offset, old_template->data_type_of_block_begin_warp_index_offset, block_index);
unsigned long warp_begin_index_in_next_block = read_from_array_with_data_type(old_template->block_begin_warp_index_offset, old_template->data_type_of_block_begin_warp_index_offset, block_index + 1);
// 一个block中第一个warp的线程粒度的块的大小
assert(warp_begin_index_in_block < old_template->size_of_thread_block_size_in_warp);
unsigned long thread_level_size_in_first_warp = read_from_array_with_data_type(old_template->thread_block_size_in_warp, old_template->data_type_of_thread_block_size_in_warp, warp_begin_index_in_block);
// 一个block中第一个warp的第一个thread的索引
unsigned long thread_begin_offset_in_first_warp = read_from_array_with_data_type(old_template->warp_begin_thread_index_offset, old_template->data_type_of_warp_begin_thread_index_offset, warp_begin_index_in_block);
// 遍历所有的warp粒度的块,查看warp内thread的大小是不是相等
for (unsigned long warp_index = warp_begin_index_in_block; warp_index < warp_begin_index_in_next_block; warp_index++)
{
// 获取当前warp的线程粒度的块的大小
assert(warp_index < old_template->size_of_thread_block_size_in_warp);
unsigned long thread_level_size_in_this_warp = read_from_array_with_data_type(old_template->thread_block_size_in_warp, old_template->data_type_of_thread_block_size_in_warp, warp_index);
if (thread_level_size_in_this_warp != thread_level_size_in_first_warp)
{
cout << "can not compress in block " << block_index << " because thread level block size is not the same" << endl;
assert(false);
}
}
// 通过了校验,写new_thread_block_size_in_block_vec
new_thread_block_size_in_block_vec.push_back(thread_level_size_in_first_warp);
new_block_begin_thread_index_offset_vec.push_back(thread_begin_offset_in_first_warp);
}
assert(new_thread_block_size_in_block_vec.size() == (old_template->size_of_block_begin_warp_index_offset - 1));
// thread偏移使用的是CSR的方式,要将thread的总数量放在最后
new_block_begin_thread_index_offset_vec.push_back(read_from_array_with_data_type(old_template->warp_begin_thread_index_offset, old_template->data_type_of_warp_begin_thread_index_offset, old_template->size_of_warp_begin_thread_index_offset - 1));
// 仅仅经过padding之后值数组及其数据类型
assert(old_template->matrix->block_coor_table.item_arr[old_template->dense_block_index]->compressed_block_ptr->padding_val_arr != NULL);
void *val_arr_after_padding = old_template->matrix->block_coor_table.item_arr[old_template->dense_block_index]->compressed_block_ptr->padding_val_arr;
data_type data_type_of_val_arr_after_padding = old_template->matrix->block_coor_table.item_arr[old_template->dense_block_index]->compressed_block_ptr->val_data_type;
unsigned long size_of_val_arr_after_padding = old_template->matrix->block_coor_table.item_arr[old_template->dense_block_index]->compressed_block_ptr->padding_arr_size;
// 仅仅经过padding之后的列数组及其数据类型
void *col_index_arr_after_padding = old_template->matrix->block_coor_table.item_arr[old_template->dense_block_index]->compressed_block_ptr->read_index[5]->index_arr;
assert(col_index_arr_after_padding != NULL);
data_type data_type_of_col_index_arr_after_padding = old_template->matrix->block_coor_table.item_arr[old_template->dense_block_index]->compressed_block_ptr->read_index[5]->index_data_type;
unsigned long size_of_col_index_arr_after_padding = old_template->matrix->block_coor_table.item_arr[old_template->dense_block_index]->compressed_block_ptr->read_index[5]->length;
assert(size_of_val_arr_after_padding == size_of_col_index_arr_after_padding);
assert(size_of_col_index_arr_after_padding == old_template->size_of_val_arr);
assert(old_template->size_of_col_index_arr == old_template->size_of_val_arr);
// 新的col数组和val数组
void *new_col_index_arr = malloc_arr(size_of_col_index_arr_after_padding, data_type_of_col_index_arr_after_padding);
void *new_val_arr = malloc_arr(size_of_val_arr_after_padding, data_type_of_val_arr_after_padding);
// 这里做一个交错存储,每一个thread中的元素交错起来,每个block内分别交错自己的内容
// 遍历每一个block
for (unsigned long block_index = 0; block_index < (old_template->size_of_block_begin_warp_index_offset - 1); block_index++)
{
// 当前块的头部非零元索引
unsigned long block_begin_nz_index = read_from_array_with_data_type(old_template->block_nz_begin_offset, old_template->data_type_of_block_nz_begin_offset, block_index);
// 当前块的thread块的起始位置
unsigned long block_begin_thread_index = new_block_begin_thread_index_offset_vec[block_index];
// 下一个块thread块的起始位置
unsigned long next_block_begin_thread_index = new_block_begin_thread_index_offset_vec[block_index + 1];
assert(next_block_begin_thread_index >= block_begin_thread_index);
// 当前线程粒度的块的数量
unsigned long block_num_of_thread_level_block = next_block_begin_thread_index - block_begin_thread_index;
// 当前线程粒度的块的大小
unsigned long block_size_of_thread_level_block = new_thread_block_size_in_block_vec[block_index];
if (block_index < (old_template->size_of_block_begin_warp_index_offset - 2))
{
assert((block_begin_nz_index + block_num_of_thread_level_block * block_size_of_thread_level_block) == read_from_array_with_data_type(old_template->block_nz_begin_offset, old_template->data_type_of_block_nz_begin_offset, block_index + 1));
}
assert(next_block_begin_thread_index > block_begin_thread_index);
// 遍历当前线程块粒度的块的所有线程粒度的块
for (unsigned long thread_index = block_begin_thread_index; thread_index < next_block_begin_thread_index; thread_index++)
{
// 线程的块内索引
unsigned long thread_inner_block = thread_index - block_begin_thread_index;
// 遍历线程粒度的块的所有非零元
for (unsigned long nz_index = 0; nz_index < block_size_of_thread_level_block; nz_index++)
{
// 当前非零元在源数组中的位置
unsigned long source_index_of_this_nz = block_begin_nz_index + thread_inner_block * block_size_of_thread_level_block + nz_index;
assert(source_index_of_this_nz < size_of_val_arr_after_padding);
// 将val和col从源数组中读出来
double val = read_double_from_array_with_data_type(val_arr_after_padding, data_type_of_val_arr_after_padding, source_index_of_this_nz);
unsigned long col_index = read_from_array_with_data_type(col_index_arr_after_padding, data_type_of_col_index_arr_after_padding, source_index_of_this_nz);
// 当前非零元在目标数组中的位置,按照线程粒度的块的数量交错来存
unsigned long dest_index_of_this_nz = block_begin_nz_index + thread_inner_block + nz_index * block_num_of_thread_level_block;
if (dest_index_of_this_nz >= size_of_val_arr_after_padding)
{
cout << "dest_index_of_this_nz:" << dest_index_of_this_nz << endl;
cout << "block_begin_nz_index:" << block_begin_nz_index << endl;
cout << "thread_inner_block:" << thread_inner_block << endl;
cout << "block_num_of_thread_level_block:" << block_num_of_thread_level_block << endl;
cout << "nz_index:" << nz_index << endl;
cout << "block_index:" << block_index << endl;
cout << "size_of_val_arr_after_padding:" << size_of_val_arr_after_padding << endl;
}
assert(dest_index_of_this_nz < size_of_val_arr_after_padding);
// 将内容写入
write_to_array_with_data_type(new_col_index_arr, data_type_of_col_index_arr_after_padding, dest_index_of_this_nz, col_index);
write_double_to_array_with_data_type(new_val_arr, data_type_of_val_arr_after_padding, dest_index_of_this_nz, val);
}
}
}
// 析构warp层次的一系列元数据
delete_arr_with_data_type(old_template->warp_begin_thread_index_offset, old_template->data_type_of_warp_begin_thread_index_offset);
delete_arr_with_data_type(old_template->warp_nz_begin_offset, old_template->data_type_of_warp_nz_begin_offset);
delete_arr_with_data_type(old_template->thread_block_size_in_warp, old_template->data_type_of_thread_block_size_in_warp);
// block的warp首索引
delete_arr_with_data_type(old_template->block_begin_warp_index_offset, old_template->data_type_of_block_begin_warp_index_offset);
old_template->warp_begin_thread_index_offset = NULL;
old_template->warp_nz_begin_offset = NULL;
old_template->thread_block_size_in_warp = NULL;
old_template->block_begin_warp_index_offset = NULL;
shared_memory_template_warp_compress_t *new_template = new shared_memory_template_warp_compress_t();
new_template->dense_block_index = old_template->dense_block_index;
new_template->matrix = old_template->matrix;
new_template->kernal_first_row_index = old_template->kernal_first_row_index;
new_template->kernal_first_col_index = old_template->kernal_first_col_index;
// 继承之前的原子加策略
new_template->is_atom_add = old_template->is_atom_add;
// block的第一个thread的偏移量
new_template->data_type_of_block_begin_thread_index_offset = find_most_suitable_data_type(new_block_begin_thread_index_offset_vec[new_block_begin_thread_index_offset_vec.size() - 1]);
assert(new_block_begin_thread_index_offset_vec.size() == old_template->size_of_block_begin_warp_index_offset);
new_template->size_of_block_begin_thread_index_offset = new_block_begin_thread_index_offset_vec.size();
new_template->block_begin_thread_index_offset = malloc_arr(new_block_begin_thread_index_offset_vec.size(), new_template->data_type_of_block_begin_thread_index_offset);
copy_unsigned_long_arr_to_others(&(new_block_begin_thread_index_offset_vec[0]), new_template->block_begin_thread_index_offset, new_template->data_type_of_block_begin_thread_index_offset, new_template->size_of_block_begin_thread_index_offset);
// block首个非零元的偏移
new_template->data_type_of_block_nz_begin_offset = old_template->data_type_of_block_nz_begin_offset;
new_template->size_of_block_nz_begin_offset = old_template->size_of_block_nz_begin_offset;
assert(new_template->size_of_block_nz_begin_offset && new_template->size_of_block_begin_thread_index_offset - 1);
new_template->block_nz_begin_offset = old_template->block_nz_begin_offset;
// 每个block线程的数量
new_template->data_type_of_thread_block_size_in_block = find_most_suitable_data_type(new_thread_block_size_in_block_vec[new_thread_block_size_in_block_vec.size() - 1]);
assert(new_thread_block_size_in_block_vec.size() == old_template->size_of_block_nz_begin_offset);
new_template->size_of_thread_block_size_in_block = new_thread_block_size_in_block_vec.size();
new_template->thread_block_size_in_block = malloc_arr(new_thread_block_size_in_block_vec.size(), new_template->data_type_of_thread_block_size_in_block);
// 拷贝
copy_unsigned_long_arr_to_others(&(new_thread_block_size_in_block_vec[0]), new_template->thread_block_size_in_block, new_template->data_type_of_thread_block_size_in_block, new_template->size_of_thread_block_size_in_block);
// 不拷贝压缩情况,先压缩模板,然后压缩数据
// 排序元数据的拷贝
new_template->global_sort_index = old_template->global_sort_index;
new_template->local_sort_index = old_template->local_sort_index;
new_template->row_index_before_sort = old_template->row_index_before_sort;
new_template->data_type_of_row_index_before_sort = old_template->data_type_of_row_index_before_sort;
new_template->size_of_row_index_before_sort = old_template->size_of_row_index_before_sort;
new_template->block_first_row_index = old_template->block_first_row_index;
new_template->size_of_block_first_row_index = old_template->size_of_block_first_row_index;
new_template->data_type_of_block_first_row_index = old_template->data_type_of_block_first_row_index;
new_template->row_offset_in_thread_tmp_result = old_template->row_offset_in_thread_tmp_result;
new_template->size_of_row_offset_in_thread_tmp_result = old_template->size_of_row_offset_in_thread_tmp_result;
new_template->data_type_of_row_offset_in_thread_tmp_result = old_template->data_type_of_row_offset_in_thread_tmp_result;
// 值数组
new_template->data_type_of_val_arr = old_template->data_type_of_val_arr;
new_template->size_of_val_arr = old_template->size_of_val_arr;
new_template->val_arr = new_val_arr;
// 列数组
new_template->data_type_of_col_index_arr = old_template->data_type_of_col_index_arr;
new_template->size_of_col_index_arr = old_template->size_of_col_index_arr;
new_template->col_index_arr = new_col_index_arr;
return new_template;
}
shared_memory_template_warp_compress_t *init_shared_memory_template_warp_compress(code_builder_t *builder, unsigned long dense_block_id)
{
assert(builder != NULL);
assert(builder->op_manager != NULL);
assert(builder->op_manager->matrix != NULL);
sparse_struct_t *matrix = builder->op_manager->matrix;
assert(matrix->block_coor_table.item_arr.size() > dense_block_id);
// 创建对应模板
shared_memory_template_warp_compress_t *new_template = new shared_memory_template_warp_compress_t();
new_template->dense_block_index = dense_block_id;
new_template->matrix = matrix;
new_template->kernal_first_row_index = matrix->block_coor_table.item_arr[dense_block_id]->min_dense_row_index;
new_template->kernal_first_col_index = matrix->block_coor_table.item_arr[dense_block_id]->min_dense_col_index;
compressed_block_t *compressed_block_view = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr;
if (matrix->block_coor_table.item_arr[dense_block_id]->min_dense_col_index == 0 && matrix->block_coor_table.item_arr[dense_block_id]->max_dense_col_index == matrix->dense_col_number - 1)
{
// 稠密子块之间没有共享的行
}
else
{
new_template->is_atom_add = true;
}
// 首先处理每一线程的全局行索引,将全局行索引搞出来
// 分别遍历三个层次的索引
index_of_compress_block_t *block_level_index = compressed_block_view->read_index[2];
index_of_compress_block_t *warp_level_index = compressed_block_view->read_index[3];
index_of_compress_block_t *thread_level_index = compressed_block_view->read_index[4];
assert(block_level_index->level_of_this_index == TBLOCK_LEVEL);
assert(warp_level_index->level_of_this_index == WRAP_LEVEL);
assert(thread_level_index->level_of_this_index == THREAD_LEVEL);
assert(compressed_block_view->read_index[0]->max_row_index == block_level_index->max_row_index);
assert(matrix->block_coor_table.item_arr[dense_block_id]->max_dense_row_index <= block_level_index->max_row_index);
if (thread_level_index->row_number_of_block_arr != NULL)
{
cout << "row num in thread level block must be 1, thread level index shouldn't have this metadata" << endl;
assert(false);
}
new_template->effective_row_num = matrix->block_coor_table.item_arr[dense_block_id]->max_dense_row_index - matrix->block_coor_table.item_arr[dense_block_id]->min_dense_row_index + 1;
// 遍历所有线程块粒度的块所包含的行,如果有相交代表要用原子加来规约显存上的结果
// 用两个变量分别存储,当前的线程块粒度的块包含的最小行号和最大行号
unsigned long global_min_row_index = read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, 0);
unsigned long global_max_row_index = global_min_row_index + read_from_array_with_data_type(block_level_index->row_number_of_block_arr, block_level_index->data_type_of_row_number_of_block_arr, 0) - 1;
for (unsigned long index_of_block_level_index = 1; index_of_block_level_index < block_level_index->block_num; index_of_block_level_index++)
{
// 当前块的起始行号和包含的行的数量,
unsigned long cur_min_row_index = read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, index_of_block_level_index);
if (cur_min_row_index <= global_max_row_index)
{
// 代表有重合的部分
new_template->is_atom_add = true;
// 这里的重合代表一行可能有多个线程块,就不支持这个模板了
cout << "several tblock level block in one row, not supported in this template" << endl;
assert(false);
break;
}
unsigned long cur_row_num = read_from_array_with_data_type(block_level_index->row_number_of_block_arr, block_level_index->data_type_of_row_number_of_block_arr, index_of_block_level_index);
if (cur_row_num + cur_min_row_index - 1 > global_max_row_index)
{
// 没有重合就修改元数据
global_max_row_index = cur_min_row_index + cur_row_num - 1;
}
}
// 当前稠密子块的行数量,这个值也是块最大行号+1
unsigned long total_row_num = block_level_index->max_row_index - block_level_index->min_row_index + 1;
vector<unsigned long> new_row_offset_in_thread_tmp_result_vec;
new_row_offset_in_thread_tmp_result_vec.push_back(0);
// 用一个数组记录每一行中间结果的数量
vector<unsigned long> thread_level_result_num_of_each_row(total_row_num);
// 每个block的线程粒度的块的大小
vector<unsigned long> new_thread_block_size_in_block_vec(block_level_index->block_num);
// 每个block的thread粒度的块的偏移量
// cout << "block_level_index->block_num + 1:" << block_level_index->block_num + 1 << endl;
vector<unsigned long> new_block_begin_thread_index_offset_vec(block_level_index->block_num + 1);
// 全部初始化为0
for (unsigned long i = 0; i < total_row_num; i++)
{
thread_level_result_num_of_each_row[i] = 0;
}
// 分别遍历三个层次
// 遍历三个层次的索引,计算每一行中间结果的数量,并且计算每一个block线程粒度的块的大小,以及每个块的thead偏移量
for (unsigned long index_of_block_level_index = 0; index_of_block_level_index < block_level_index->block_num; index_of_block_level_index++)
{
// cout << "index_of_block_level_index:" << index_of_block_level_index << endl;
// 当前block的首行行号
unsigned long block_first_row_index = read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, index_of_block_level_index);
// block中第一个warp号和下一个block的首warp
unsigned long this_block_first_warp_index = read_from_array_with_data_type(block_level_index->index_arr, block_level_index->index_data_type, index_of_block_level_index);
unsigned long next_block_first_warp_index = read_from_array_with_data_type(block_level_index->index_arr, block_level_index->index_data_type, index_of_block_level_index + 1);
// 当前线程块粒度的块的第一个warp粒度的块的线程粒度的块的大小
assert(this_block_first_warp_index < warp_level_index->block_num && thread_level_index->coo_block_size_arr != NULL);
unsigned long this_block_first_warp_thread_block_size = read_from_array_with_data_type(thread_level_index->coo_block_size_arr, thread_level_index->data_type_of_coo_block_size_arr, this_block_first_warp_index);
assert(this_block_first_warp_thread_block_size != 0);
new_thread_block_size_in_block_vec[index_of_block_level_index] = this_block_first_warp_thread_block_size;
// 当前线程块粒度的块第一个线程粒度的块的索引
assert(this_block_first_warp_index < warp_level_index->length);
unsigned long this_block_first_thread_index = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, this_block_first_warp_index);
new_block_begin_thread_index_offset_vec[index_of_block_level_index] = this_block_first_thread_index;
// 遍历warp层次
for (unsigned long index_of_warp_level_index = this_block_first_warp_index; index_of_warp_level_index < next_block_first_warp_index; index_of_warp_level_index++)
{
assert(index_of_warp_level_index < warp_level_index->block_num);
unsigned long warp_first_row_index = read_from_array_with_data_type(warp_level_index->index_of_the_first_row_arr, warp_level_index->data_type_of_index_of_the_first_row_arr, index_of_warp_level_index);
unsigned long this_warp_first_thread_index = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, index_of_warp_level_index);
unsigned long next_warp_first_thread_index = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, index_of_warp_level_index + 1);
// 当前warp的线程粒度的块的大小
unsigned long this_warp_thread_level_block_size = read_from_array_with_data_type(thread_level_index->coo_block_size_arr, thread_level_index->data_type_of_coo_block_size_arr, index_of_warp_level_index);
// 比较线程粒度的块的大小是不是一致的
if (this_warp_thread_level_block_size != new_thread_block_size_in_block_vec[index_of_block_level_index])
{
cout << "this_warp_thread_level_block_size:" << this_warp_thread_level_block_size << endl;
cout << "new_thread_block_size_in_block_vec[index_of_block_level_index]:" << new_thread_block_size_in_block_vec[index_of_block_level_index] << endl;
cout << "can not compress in block " << index_of_block_level_index << " because thread level block size is not the same" << endl;
assert(false);
}
for (unsigned long index_of_thread_level_index = this_warp_first_thread_index; index_of_thread_level_index < next_warp_first_thread_index; index_of_thread_level_index++)
{
// assert(index_of_thread_level_index < thread_level_index->block_num);
if (index_of_thread_level_index >= thread_level_index->block_num)
{
assert(false);
}
unsigned long thread_first_row_index = read_from_array_with_data_type(thread_level_index->index_of_the_first_row_arr, thread_level_index->data_type_of_index_of_the_first_row_arr, index_of_thread_level_index);
// 全局的线程粒度的子块所覆盖的行
unsigned long global_thread_first_row_index = block_first_row_index + warp_first_row_index + thread_first_row_index;
assert(global_thread_first_row_index < total_row_num);
thread_level_result_num_of_each_row[global_thread_first_row_index] = thread_level_result_num_of_each_row[global_thread_first_row_index] + 1;
// 因为在线程块粒度先进行一次归约,线程块内部肯定是一行一个结果,所以所以本质上判断线程块粒度的块的行的分布来判断对于原子加的要求
}
}
}
// 遍历每一行结果的数量,从而得出每一行在中间结果中的偏移量
for (unsigned long row_index = 0; row_index < thread_level_result_num_of_each_row.size(); row_index++)
{
new_row_offset_in_thread_tmp_result_vec.push_back(new_row_offset_in_thread_tmp_result_vec[new_row_offset_in_thread_tmp_result_vec.size() - 1] + thread_level_result_num_of_each_row[row_index]);
}
assert(new_row_offset_in_thread_tmp_result_vec.size() == total_row_num + 1);
// block的线程粒度的首个索引还需要一个结束位置
new_block_begin_thread_index_offset_vec[block_level_index->block_num] = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, warp_level_index->block_num);
// 确定数据类型的大小
new_template->data_type_of_row_offset_in_thread_tmp_result = find_most_suitable_data_type(new_row_offset_in_thread_tmp_result_vec[new_row_offset_in_thread_tmp_result_vec.size() - 1]);
// 数组的长度
new_template->size_of_row_offset_in_thread_tmp_result = new_row_offset_in_thread_tmp_result_vec.size();
// 申请归约数组
new_template->row_offset_in_thread_tmp_result = malloc_arr(new_template->size_of_row_offset_in_thread_tmp_result, new_template->data_type_of_row_offset_in_thread_tmp_result);
// 拷贝数组
copy_unsigned_long_arr_to_others(&(new_row_offset_in_thread_tmp_result_vec[0]), new_template->row_offset_in_thread_tmp_result, new_template->data_type_of_row_offset_in_thread_tmp_result, new_template->size_of_row_offset_in_thread_tmp_result);
// 拷贝块的首行行号,因为实际大小会多一个,需要重新申请一个数组,最后一位是整个block行的数量,插在最后的CSR索引的大小肯定大于之前的所有数据
// 考虑到空行,最多也是等于最后一个数据
assert(total_row_num >= read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, block_level_index->block_num - 1));
// 创建一个新的数组,包含block_num + 1个元素,最后一个是整个稠密子块的总行号
new_template->data_type_of_block_first_row_index = find_most_suitable_data_type(total_row_num);
new_template->size_of_block_first_row_index = block_level_index->length;
new_template->block_first_row_index = malloc_arr(new_template->size_of_block_first_row_index, new_template->data_type_of_block_first_row_index);
// 不同类型数组之间的拷贝,从block_level_index中拷贝到new_template中
for (unsigned long block_id = 0; block_id < block_level_index->block_num; block_id++)
{
unsigned long source_arr_content = read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, block_id);
// 写数据
write_to_array_with_data_type(new_template->block_first_row_index, new_template->data_type_of_block_first_row_index, block_id, source_arr_content);
}
// 最后写一个数据,写的的是行的数量
write_to_array_with_data_type(new_template->block_first_row_index, new_template->data_type_of_block_first_row_index, block_level_index->block_num, total_row_num);
new_template->data_type_of_block_begin_thread_index_offset = find_most_suitable_data_type(new_block_begin_thread_index_offset_vec[block_level_index->block_num]);
new_template->size_of_block_begin_thread_index_offset = new_block_begin_thread_index_offset_vec.size();
// 申请数组以及拷贝
new_template->block_begin_thread_index_offset = malloc_arr(new_template->size_of_block_begin_thread_index_offset, new_template->data_type_of_block_begin_thread_index_offset);
copy_unsigned_long_arr_to_others(&(new_block_begin_thread_index_offset_vec[0]), new_template->block_begin_thread_index_offset, new_template->data_type_of_block_begin_thread_index_offset, new_template->size_of_block_begin_thread_index_offset);
// 继承thread元数据中的数据类型
new_template->data_type_of_thread_block_size_in_block = thread_level_index->data_type_of_coo_block_size_arr;
new_template->size_of_thread_block_size_in_block = new_thread_block_size_in_block_vec.size();
new_template->thread_block_size_in_block = malloc_arr(new_template->size_of_thread_block_size_in_block, new_template->data_type_of_thread_block_size_in_block);
copy_unsigned_long_arr_to_others(&(new_thread_block_size_in_block_vec[0]), new_template->thread_block_size_in_block, new_template->data_type_of_thread_block_size_in_block, new_template->size_of_thread_block_size_in_block);
// 排序相关
// 一些排序的数据
// 最后给出排序索引类型和具体的数组
if (compressed_block_view->y_write_index.size() > 0)
{
// 在子块内排序了
assert(compressed_block_view->is_sorted == true && builder->sub_block_sort_type_vec[dense_block_id] == SUB_BLOCK_SORT && matrix->is_sorted == false);
new_template->global_sort_index = false;
new_template->local_sort_index = true;
// 拷贝
new_template->data_type_of_row_index_before_sort = compressed_block_view->y_write_index[0]->index_data_type;
new_template->row_index_before_sort = compressed_block_view->y_write_index[0]->index_arr;
new_template->size_of_row_index_before_sort = compressed_block_view->y_write_index[0]->length;
}
else if (matrix->sorted_row_index != NULL)
{
cout << "have global sort" << endl;
// 在全局范围内有排序
assert(compressed_block_view->is_sorted == false && matrix->is_sorted == true && builder->sub_block_sort_type_vec[dense_block_id] == GLOBAL_SORT);
new_template->global_sort_index = true;
new_template->local_sort_index = false;
// 拷贝
new_template->data_type_of_row_index_before_sort = matrix->data_type_of_sorted_row_index;
new_template->row_index_before_sort = matrix->sorted_row_index;
new_template->size_of_row_index_before_sort = matrix->dense_row_number;
}
// block nz
// block和warp第一个非零元的索引,都按照各自的block size初始化
new_template->data_type_of_block_nz_begin_offset = block_level_index->data_type_of_coo_begin_index_arr;
new_template->block_nz_begin_offset = block_level_index->coo_begin_index_arr;
new_template->size_of_block_nz_begin_offset = block_level_index->block_num;
// col和val的padding
void *val_arr_after_padding = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->padding_val_arr;
data_type data_type_of_val_arr_after_padding = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->val_data_type;
unsigned long size_of_val_arr_after_padding = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->padding_arr_size;
// 仅仅经过padding之后的列数组及其数据类型
void *col_index_arr_after_padding = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->read_index[5]->index_arr;
assert(col_index_arr_after_padding != NULL);
data_type data_type_of_col_index_arr_after_padding = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->read_index[5]->index_data_type;
unsigned long size_of_col_index_arr_after_padding = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->read_index[5]->length;
assert(size_of_val_arr_after_padding == size_of_col_index_arr_after_padding);
// 将每个wrap块内部的数据进行交错存储,每个wrap内的线程粒度的块的大小是一样的,大小和padding之后的大小是一样的,数据类型也一样
new_template->data_type_of_val_arr = data_type_of_val_arr_after_padding;
new_template->size_of_val_arr = size_of_val_arr_after_padding;
// 申请值数组
new_template->val_arr = malloc_arr(new_template->size_of_val_arr, new_template->data_type_of_val_arr);
// 列号也和padding之后是一样
new_template->data_type_of_col_index_arr = data_type_of_col_index_arr_after_padding;
new_template->size_of_col_index_arr = size_of_col_index_arr_after_padding;
new_template->col_index_arr = malloc_arr(new_template->size_of_col_index_arr, new_template->data_type_of_col_index_arr);
// 这里做一个交错存储,每一个thread中的元素交错起来,每个block内分别交错自己的内容
// 遍历每一个block
for (unsigned long block_index = 0; block_index < (block_level_index->block_num - 1); block_index++)
{
// 当前块的头部非零元索引
unsigned long block_begin_nz_index = read_from_array_with_data_type(new_template->block_nz_begin_offset, new_template->data_type_of_block_nz_begin_offset, block_index);
// 当前块的thread块的起始位置
unsigned long block_begin_thread_index = new_block_begin_thread_index_offset_vec[block_index];
// 下一个块thread块的起始位置
unsigned long next_block_begin_thread_index = new_block_begin_thread_index_offset_vec[block_index + 1];
assert(next_block_begin_thread_index >= block_begin_thread_index);
// 当前线程粒度的块的数量
unsigned long block_num_of_thread_level_block = next_block_begin_thread_index - block_begin_thread_index;
// 当前线程粒度的块的大小
unsigned long block_size_of_thread_level_block = new_thread_block_size_in_block_vec[block_index];
if (block_index < (block_level_index->length - 2))
{
assert((block_begin_nz_index + block_num_of_thread_level_block * block_size_of_thread_level_block) == read_from_array_with_data_type(new_template->block_nz_begin_offset, new_template->data_type_of_block_nz_begin_offset, block_index + 1));
}
assert(next_block_begin_thread_index > block_begin_thread_index);
// 遍历当前线程块粒度的块的所有线程粒度的块
for (unsigned long thread_index = block_begin_thread_index; thread_index < next_block_begin_thread_index; thread_index++)
{
// 线程的块内索引
unsigned long thread_inner_block = thread_index - block_begin_thread_index;
// 遍历线程粒度的块的所有非零元
for (unsigned long nz_index = 0; nz_index < block_size_of_thread_level_block; nz_index++)
{
// 当前非零元在源数组中的位置
unsigned long source_index_of_this_nz = block_begin_nz_index + thread_inner_block * block_size_of_thread_level_block + nz_index;
assert(source_index_of_this_nz < size_of_val_arr_after_padding);
// 将val和col从源数组中读出来
double val = read_double_from_array_with_data_type(val_arr_after_padding, data_type_of_val_arr_after_padding, source_index_of_this_nz);
unsigned long col_index = read_from_array_with_data_type(col_index_arr_after_padding, data_type_of_col_index_arr_after_padding, source_index_of_this_nz);
// 当前非零元在目标数组中的位置,按照线程粒度的块的数量交错来存
unsigned long dest_index_of_this_nz = block_begin_nz_index + thread_inner_block + nz_index * block_num_of_thread_level_block;
if (dest_index_of_this_nz >= size_of_val_arr_after_padding)
{
cout << "dest_index_of_this_nz:" << dest_index_of_this_nz << endl;
cout << "block_begin_nz_index:" << block_begin_nz_index << endl;
cout << "thread_inner_block:" << thread_inner_block << endl;
cout << "block_num_of_thread_level_block:" << block_num_of_thread_level_block << endl;
cout << "nz_index:" << nz_index << endl;
cout << "block_index:" << block_index << endl;
cout << "size_of_val_arr_after_padding:" << size_of_val_arr_after_padding << endl;
}
assert(dest_index_of_this_nz < size_of_val_arr_after_padding);
// 将内容写入
write_to_array_with_data_type(new_template->col_index_arr, data_type_of_col_index_arr_after_padding, dest_index_of_this_nz, col_index);
write_double_to_array_with_data_type(new_template->val_arr, data_type_of_val_arr_after_padding, dest_index_of_this_nz, val);
}
}
}
// 返回对应指针
return new_template;
}
bool is_supported_by_shared_memory_template_warp_compress(sparse_struct_t *matrix, unsigned long dense_block_id)
{
assert(dense_block_id < matrix->block_coor_table.item_arr.size());
assert(matrix->block_coor_table.item_arr[dense_block_id] != NULL);
assert(matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr != NULL);
assert(matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->read_index.size() == 7);
compressed_block_t *compressed_block_view = matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr;
index_of_compress_block_t *block_level_index = compressed_block_view->read_index[2];
index_of_compress_block_t *warp_level_index = compressed_block_view->read_index[3];
index_of_compress_block_t *thread_level_index = compressed_block_view->read_index[4];
assert(block_level_index->level_of_this_index == TBLOCK_LEVEL);
assert(warp_level_index->level_of_this_index == WRAP_LEVEL);
assert(thread_level_index->level_of_this_index == THREAD_LEVEL);
assert(matrix->block_coor_table.item_arr[dense_block_id]->max_dense_row_index <= block_level_index->max_row_index);
assert(block_level_index->max_row_index == matrix->block_coor_table.item_arr[dense_block_id]->compressed_block_ptr->read_index[0]->max_row_index);
if (thread_level_index->row_number_of_block_arr != NULL)
{
return false;
}
unsigned long total_row_num = block_level_index->max_row_index - block_level_index->min_row_index + 1;
// 用一个变量,记录是不是有多个TLB处理一行,不存在这种情况会导致模板的检测不通过
bool is_many_TLB_one_row = false;
// 前一个TLB对应的全局行号
bool is_first_TLB = true;
unsigned long last_TLB_global_row_index = 0;
// 记录BLB中最大的TLB数量
unsigned long max_TLB_num_in_BLB = 0;
// 前一个BLB的首行行号
bool is_first_BLB = true;
unsigned long last_BLB_global_row_index = 0;
// 遍历三个层次的索引,找到BLB中TLB的最大数量、有没有TLB共处理一行的情况、有没有BLB共处理一行的情况、BLB中不同TLB的非零元数量是不是相等
// 遍历三个层次的索引,计算每一行中间结果的数量,并且计算每一个block线程粒度的块的大小,以及每个块的thead偏移量
for (unsigned long index_of_block_level_index = 0; index_of_block_level_index < block_level_index->block_num; index_of_block_level_index++)
{
// cout << "index_of_block_level_index:" << index_of_block_level_index << endl;
// 当前block的首行行号
unsigned long block_first_row_index = read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, index_of_block_level_index);
if (is_first_BLB == true)
{
last_BLB_global_row_index = block_first_row_index;
is_first_BLB = false;
}
else
{
if (last_BLB_global_row_index == block_first_row_index)
{
// 两个BLB共用一行,检查不能通过
return false;
}
last_BLB_global_row_index = block_first_row_index;
}
// block中第一个warp号和下一个block的首warp
unsigned long this_block_first_warp_index = read_from_array_with_data_type(block_level_index->index_arr, block_level_index->index_data_type, index_of_block_level_index);
unsigned long next_block_first_warp_index = read_from_array_with_data_type(block_level_index->index_arr, block_level_index->index_data_type, index_of_block_level_index + 1);
// 当前线程块粒度的块的第一个warp粒度的块的线程粒度的块的大小
assert(this_block_first_warp_index < warp_level_index->block_num && thread_level_index->coo_block_size_arr != NULL);
unsigned long this_block_first_warp_thread_block_size = read_from_array_with_data_type(thread_level_index->coo_block_size_arr, thread_level_index->data_type_of_coo_block_size_arr, this_block_first_warp_index);
assert(this_block_first_warp_thread_block_size != 0);
// 当前线程块粒度的块第一个线程粒度的块的索引
assert(this_block_first_warp_index < warp_level_index->length);
assert(next_block_first_warp_index < warp_level_index->length);
unsigned long this_block_first_thread_index = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, this_block_first_warp_index);
unsigned long next_block_first_thread_index = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, next_block_first_warp_index);
assert(this_block_first_thread_index < thread_level_index->block_num && next_block_first_thread_index <= thread_level_index->block_num);
assert(next_block_first_thread_index > this_block_first_thread_index);
unsigned long cur_TLB_num_in_BLB = next_block_first_thread_index - this_block_first_thread_index;
if (max_TLB_num_in_BLB < cur_TLB_num_in_BLB)
{
max_TLB_num_in_BLB = cur_TLB_num_in_BLB;
}
// 遍历warp层次
for (unsigned long index_of_warp_level_index = this_block_first_warp_index; index_of_warp_level_index < next_block_first_warp_index; index_of_warp_level_index++)
{
assert(index_of_warp_level_index < warp_level_index->block_num);
unsigned long warp_first_row_index = read_from_array_with_data_type(warp_level_index->index_of_the_first_row_arr, warp_level_index->data_type_of_index_of_the_first_row_arr, index_of_warp_level_index);
unsigned long this_warp_first_thread_index = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, index_of_warp_level_index);
unsigned long next_warp_first_thread_index = read_from_array_with_data_type(warp_level_index->index_arr, warp_level_index->index_data_type, index_of_warp_level_index + 1);
// 当前warp的线程粒度的块的大小
unsigned long this_warp_thread_level_block_size = read_from_array_with_data_type(thread_level_index->coo_block_size_arr, thread_level_index->data_type_of_coo_block_size_arr, index_of_warp_level_index);
// 如果WLB和BLB的TLB大小不相等,直接false
if (this_block_first_warp_thread_block_size != this_warp_thread_level_block_size)
{
return false;
}
for (unsigned long index_of_thread_level_index = this_warp_first_thread_index; index_of_thread_level_index < next_warp_first_thread_index; index_of_thread_level_index++)
{
// assert(index_of_thread_level_index < thread_level_index->block_num);
if (index_of_thread_level_index >= thread_level_index->block_num)
{
assert(false);
}
unsigned long thread_first_row_index = read_from_array_with_data_type(thread_level_index->index_of_the_first_row_arr, thread_level_index->data_type_of_index_of_the_first_row_arr, index_of_thread_level_index);
// 全局的线程粒度的子块所覆盖的行
unsigned long global_thread_first_row_index = block_first_row_index + warp_first_row_index + thread_first_row_index;
if (is_first_TLB == true)
{
// 给TLB历史行号赋值
last_TLB_global_row_index = global_thread_first_row_index;
is_first_TLB = false;
}
else
{
// 如果不是第一个TLB,和之前的比较
if (last_TLB_global_row_index == global_thread_first_row_index)
{
is_many_TLB_one_row = true;
}
last_TLB_global_row_index = global_thread_first_row_index;
}
assert(global_thread_first_row_index < total_row_num);
}
}
}
// 遍历所有线程块粒度的块所包含的行,如果有相交代表要用原子加来规约显存上的结果
// 用两个变量分别存储,当前的线程块粒度的块包含的最小行号和最大行号
unsigned long global_min_row_index = read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, 0);
unsigned long global_max_row_index = global_min_row_index + read_from_array_with_data_type(block_level_index->row_number_of_block_arr, block_level_index->data_type_of_row_number_of_block_arr, 0) - 1;
for (unsigned long index_of_block_level_index = 1; index_of_block_level_index < block_level_index->block_num; index_of_block_level_index++)
{
// 当前块的起始行号和包含的行的数量,
unsigned long cur_min_row_index = read_from_array_with_data_type(block_level_index->index_of_the_first_row_arr, block_level_index->data_type_of_index_of_the_first_row_arr, index_of_block_level_index);
if (cur_min_row_index <= global_max_row_index)
{
// 这里的重合代表一行可能有多个线程块,就不支持这个模板了
return false;
}
unsigned long cur_row_num = read_from_array_with_data_type(block_level_index->row_number_of_block_arr, block_level_index->data_type_of_row_number_of_block_arr, index_of_block_level_index);
if (cur_row_num + cur_min_row_index - 1 > global_max_row_index)
{
// 没有重合就修改元数据
global_max_row_index = cur_min_row_index + cur_row_num - 1;
}
}
// 如果一个线程处理一行,检查不通过
if (is_many_TLB_one_row == false)
{
return false;
}
// 如果BLB中TLB数量大于3950,检查不通过
if (max_TLB_num_in_BLB > get_config()["SHARED_MEM_TOTAL_SIZE"].as_integer() - 50)
{
return false;
}
return true;
}
// 共享内存不溢出,不能一个线程对应一行,一行不能被多个线程块处理,一个BLB中不同TLB的大小保持一致
bool is_supported_by_shared_memory_template_warp_compress(code_builder_t *builder, unsigned long dense_block_id)
{
assert(builder != NULL);
assert(builder->op_manager != NULL);
assert(builder->op_manager->matrix != NULL);
sparse_struct_t *matrix = builder->op_manager->matrix;
return is_supported_by_shared_memory_template_warp_compress(matrix, dense_block_id);
}
void store_template_data(shared_memory_template_warp_compress_t *output_template, string output_dir, bool force_not_share_global_sort_index)
{
srand(time(0));
unsigned long matrix_id = rand() + time(0) % 1000;
// 写这个模板所需要数据的文件夹名称
output_dir = output_dir + "/" + to_string(matrix_id) + "_" + to_string(get_config()["DEFAULT_DEVICE_ID"].as_integer());
// 创建这个文件夹
system(("mkdir " + output_dir).c_str());
// 只有不压缩的时候才持久化
if (output_template->row_offset_in_thread_tmp_result_compress == NONE_COMPRESS)
{
assert(output_template->row_offset_in_thread_tmp_result != NULL);
print_arr_to_file_with_data_type(output_template->row_offset_in_thread_tmp_result, output_template->data_type_of_row_offset_in_thread_tmp_result, output_template->size_of_row_offset_in_thread_tmp_result, output_dir + "/row_offset_in_thread_tmp_result");
}
if (output_template->block_first_row_index_compress == NONE_COMPRESS)
{
assert(output_template->block_first_row_index != NULL);
print_arr_to_file_with_data_type(output_template->block_first_row_index, output_template->data_type_of_block_first_row_index, output_template->size_of_block_first_row_index, output_dir + "/block_first_row_index");
}
if (output_template->block_begin_thread_index_offset_compress == NONE_COMPRESS)
{
assert(output_template->block_begin_thread_index_offset != NULL);
print_arr_to_file_with_data_type(output_template->block_begin_thread_index_offset, output_template->data_type_of_block_begin_thread_index_offset, output_template->size_of_block_begin_thread_index_offset, output_dir + "/block_begin_thread_index_offset");
}
if (output_template->thread_block_size_in_block_compress == NONE_COMPRESS)
{
assert(output_template->thread_block_size_in_block != NULL);
print_arr_to_file_with_data_type(output_template->thread_block_size_in_block, output_template->data_type_of_thread_block_size_in_block, output_template->size_of_thread_block_size_in_block, output_dir + "/thread_block_size_in_block");
}
if (output_template->row_index_before_sort_compress == NONE_COMPRESS && output_template->row_index_before_sort != NULL)
{
assert(output_template->row_index_before_sort != NULL);
// 如果是全局排序,只有第一个才需要存排序之后的行索引
if (output_template->local_sort_index == true)
{
assert(output_template->global_sort_index == false);
print_arr_to_file_with_data_type(output_template->row_index_before_sort, output_template->data_type_of_row_index_before_sort, output_template->size_of_row_index_before_sort, output_dir + "/row_index_before_sort");
}
else if (output_template->global_sort_index == true && (output_template->dense_block_index == 0 || force_not_share_global_sort_index == true))
{
assert(output_template->local_sort_index == false);
print_arr_to_file_with_data_type(output_template->row_index_before_sort, output_template->data_type_of_row_index_before_sort, output_template->size_of_row_index_before_sort, output_dir + "/row_index_before_sort");
}
}
if (output_template->block_nz_begin_offset_compress == NONE_COMPRESS)
{
assert(output_template->block_nz_begin_offset != NULL);
print_arr_to_file_with_data_type(output_template->block_nz_begin_offset, output_template->data_type_of_block_nz_begin_offset, output_template->size_of_block_nz_begin_offset, output_dir + "/block_nz_begin_offset");
}
// 值
assert(output_template->val_arr != NULL);
print_arr_to_file_with_data_type(output_template->val_arr, output_template->data_type_of_val_arr, output_template->size_of_val_arr, output_dir + "/val_arr");
// 列
assert(output_template->col_index_arr != NULL);
print_arr_to_file_with_data_type(output_template->col_index_arr, output_template->data_type_of_col_index_arr, output_template->size_of_col_index_arr, output_dir + "/col_index_arr");
output_template->hash_of_this_template = matrix_id;
}
string code_of_template_data_struct(shared_memory_template_warp_compress_t *output_template, unsigned long dense_block_id)
{
// 创建一个数据结构
string return_str = "typedef struct compressed_dense_block_" + to_string(dense_block_id) + "\n{\n";
// 对应的位置分别存储行号和块号
if (output_template->row_offset_in_thread_tmp_result_compress == NONE_COMPRESS)
{
assert(output_template->row_offset_in_thread_tmp_result != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_row_offset_in_thread_tmp_result, code_of_arr_var_name(dense_block_id, -1, "row_offset_in_thread_tmp_result"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "row_offset_in_thread_tmp_result") + " = " + to_string(output_template->size_of_row_offset_in_thread_tmp_result) + ";\n";
}
return_str = return_str + "\n";
if (output_template->block_first_row_index_compress == NONE_COMPRESS)
{
assert(output_template->block_first_row_index != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_block_first_row_index, code_of_arr_var_name(dense_block_id, -1, "block_first_row_index"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "block_first_row_index") + " = " + to_string(output_template->size_of_block_first_row_index) + ";\n";
}
return_str = return_str + "\n";
if (output_template->block_begin_thread_index_offset_compress == NONE_COMPRESS)
{
assert(output_template->block_begin_thread_index_offset != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_block_begin_thread_index_offset, code_of_arr_var_name(dense_block_id, -1, "block_begin_thread_index_offset"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "block_begin_thread_index_offset") + " = " + to_string(output_template->size_of_block_begin_thread_index_offset) + ";\n";
}
return_str = return_str + "\n";
if (output_template->thread_block_size_in_block_compress == NONE_COMPRESS)
{
assert(output_template->thread_block_size_in_block != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_thread_block_size_in_block, code_of_arr_var_name(dense_block_id, -1, "thread_block_size_in_block"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "thread_block_size_in_block") + " = " + to_string(output_template->size_of_thread_block_size_in_block) + ";\n";
}
return_str = return_str + "\n";
if (output_template->row_index_before_sort_compress == NONE_COMPRESS && output_template->row_index_before_sort != NULL)
{
assert(output_template->row_index_before_sort != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_row_index_before_sort, code_of_arr_var_name(dense_block_id, -1, "row_index_before_sort"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "row_index_before_sort") + " = " + to_string(output_template->size_of_row_index_before_sort) + ";\n";
}
return_str = return_str + "\n";
if (output_template->block_nz_begin_offset_compress == NONE_COMPRESS)
{
assert(output_template->block_nz_begin_offset != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_block_nz_begin_offset, code_of_arr_var_name(dense_block_id, -1, "block_nz_begin_offset"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "block_nz_begin_offset") + " = " + to_string(output_template->size_of_block_nz_begin_offset) + ";\n";
}
return_str = return_str + "\n";
return_str = return_str + "\n";
assert(output_template->val_arr != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_val_arr, code_of_arr_var_name(dense_block_id, -1, "val_arr"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "val_arr") + " = " + to_string(output_template->size_of_val_arr) + ";\n";
return_str = return_str + "\n";
assert(output_template->col_index_arr != NULL);
return_str = return_str + code_line_of_pointer_define(output_template->data_type_of_col_index_arr, code_of_arr_var_name(dense_block_id, -1, "col_index_arr"));
return_str = return_str + code_of_data_type(UNSIGNED_LONG) + " size_of_" + code_of_arr_var_name(dense_block_id, -1, "col_index_arr") + " = " + to_string(output_template->size_of_col_index_arr) + ";\n";
return_str = return_str + "}";
return_str = return_str + "compressed_dense_block_" + to_string(dense_block_id) + "_t;\n";
return return_str;
}
string code_of_read_template_data_from_file_func_define(shared_memory_template_warp_compress_t *output_template, unsigned long dense_block_id, bool force_not_share_global_sort_index)
{
string return_str = "compressed_dense_block_" + to_string(dense_block_id) + "_t* read_dense_block_" + to_string(dense_block_id) + "_from_file(string file_name_prefix)\n{\n";
return_str = return_str + "compressed_dense_block_" + to_string(dense_block_id) + "_t *template_data = new " + "compressed_dense_block_" + to_string(dense_block_id) + "_t();\n";
// 对应的位置分别存储行号和块号
if (output_template->row_offset_in_thread_tmp_result_compress == NONE_COMPRESS)
{
assert(output_template->row_offset_in_thread_tmp_result != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "row_offset_in_thread_tmp_result") + " = (" + code_of_data_type(output_template->data_type_of_row_offset_in_thread_tmp_result) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "row_offset_in_thread_tmp_result") + ", " + convert_data_type_to_string(output_template->data_type_of_row_offset_in_thread_tmp_result) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/row_offset_in_thread_tmp_result\");\n";
}
return_str = return_str + "\n";
if (output_template->block_first_row_index_compress == NONE_COMPRESS)
{
assert(output_template->block_first_row_index != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "block_first_row_index") + " = (" + code_of_data_type(output_template->data_type_of_block_first_row_index) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "block_first_row_index") + ", " + convert_data_type_to_string(output_template->data_type_of_block_first_row_index) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/block_first_row_index\");\n";
}
return_str = return_str + "\n";
if (output_template->block_begin_thread_index_offset_compress == NONE_COMPRESS)
{
assert(output_template->block_begin_thread_index_offset != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "block_begin_thread_index_offset") + " = (" + code_of_data_type(output_template->data_type_of_block_begin_thread_index_offset) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "block_begin_thread_index_offset") + ", " + convert_data_type_to_string(output_template->data_type_of_block_begin_thread_index_offset) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/block_begin_thread_index_offset\");\n";
}
return_str = return_str + "\n";
if (output_template->thread_block_size_in_block_compress == NONE_COMPRESS)
{
assert(output_template->thread_block_size_in_block != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "thread_block_size_in_block") + " = (" + code_of_data_type(output_template->data_type_of_thread_block_size_in_block) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "thread_block_size_in_block") + ", " + convert_data_type_to_string(output_template->data_type_of_thread_block_size_in_block) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/thread_block_size_in_block\");\n";
}
return_str = return_str + "\n";
if (output_template->row_index_before_sort_compress == NONE_COMPRESS && output_template->row_index_before_sort != NULL)
{
// 如果有全局的排序索引,只有0号块需要存储
if (output_template->global_sort_index == true)
{
if (dense_block_id == 0 || force_not_share_global_sort_index == true)
{
// 存一个全局的排序
assert(output_template->row_index_before_sort != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "row_index_before_sort") + " = (" + code_of_data_type(output_template->data_type_of_row_index_before_sort) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "row_index_before_sort") + ", " + convert_data_type_to_string(output_template->data_type_of_row_index_before_sort) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/row_index_before_sort\");\n";
}
else
{
// 如果已经有了就直接拷贝全局的排序
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "row_index_before_sort") + " = NULL;\n";
}
}
else if (output_template->local_sort_index == true)
{
assert(output_template->row_index_before_sort != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "row_index_before_sort") + " = (" + code_of_data_type(output_template->data_type_of_row_index_before_sort) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "row_index_before_sort") + ", " + convert_data_type_to_string(output_template->data_type_of_row_index_before_sort) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/row_index_before_sort\");\n";
}
else
{
cout << "error" << endl;
assert(false);
}
}
return_str = return_str + "\n";
// block和warp的收个非零元索引
if (output_template->block_nz_begin_offset_compress == NONE_COMPRESS)
{
assert(output_template->block_nz_begin_offset != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "block_nz_begin_offset") + " = (" + code_of_data_type(output_template->data_type_of_block_nz_begin_offset) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "block_nz_begin_offset") + ", " + convert_data_type_to_string(output_template->data_type_of_block_nz_begin_offset) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/block_nz_begin_offset\");\n";
}
return_str = return_str + "\n";
return_str = return_str + "\n";
assert(output_template->val_arr != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "val_arr") + " = (" + code_of_data_type(output_template->data_type_of_val_arr) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "val_arr") + ", " + convert_data_type_to_string(output_template->data_type_of_val_arr) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/val_arr\");\n";
return_str = return_str + "\n";
assert(output_template->col_index_arr != NULL);
return_str = return_str + "template_data->" + code_of_arr_var_name(dense_block_id, -1, "col_index_arr") + " = (" + code_of_data_type(output_template->data_type_of_col_index_arr) + " *)";
return_str = return_str + "read_arr_from_file_with_data_type(template_data->size_of_" + code_of_arr_var_name(dense_block_id, -1, "col_index_arr") + ", " + convert_data_type_to_string(output_template->data_type_of_col_index_arr) + ", ";
// 要读的文件名
return_str = return_str + "file_name_prefix + \"/col_index_arr\");\n";
return_str = return_str + "return template_data;\n";
return_str = return_str + "}\n";
return return_str;
}
string code_of_template_kernal(shared_memory_template_warp_compress_t *output_template, unsigned long dense_block_id)
{
if (output_template->thread_num_of_row_reduce != get_config()["HALF_MAX_ROW_REDUCE_THREAD"].as_integer() && output_template->thread_num_of_row_reduce != get_config()["MAX_ROW_REDUCE_THREAD"].as_integer())
{