Skip to content

Commit 86c5289

Browse files
Fix MPI type mistakes reported by oneAPI
1 parent 58831a2 commit 86c5289

File tree

12 files changed

+47
-46
lines changed

12 files changed

+47
-46
lines changed

src/distributed_io.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -955,7 +955,7 @@ void write_output_distributed(struct engine* e,
955955
}
956956

957957
/* Compute offset in the file and total number of particles */
958-
const long long N[swift_type_count] = {
958+
long long N[swift_type_count] = {
959959
Ngas_written, Ndm_written, Ndm_background, Nsinks_written,
960960
Nstars_written, Nblackholes_written, Ndm_neutrino};
961961

src/engine.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ void engine_repartition_trigger(struct engine *e) {
326326
e->usertime_last_step, e->systime_last_step, (double)resident,
327327
e->local_deadtime / (e->nr_threads * e->wallclock_time)};
328328
double timemems[e->nr_nodes * 4];
329-
MPI_Gather(&timemem, 4, MPI_DOUBLE, timemems, 4, MPI_DOUBLE, 0,
329+
MPI_Gather(timemem, 4, MPI_DOUBLE, timemems, 4, MPI_DOUBLE, 0,
330330
MPI_COMM_WORLD);
331331
if (e->nodeID == 0) {
332332

@@ -1285,7 +1285,7 @@ void engine_rebuild(struct engine *e, const int repartitioned,
12851285
MPI_COMM_WORLD);
12861286
MPI_Allreduce(MPI_IN_PLACE, &e->s->max_softening, 1, MPI_FLOAT, MPI_MAX,
12871287
MPI_COMM_WORLD);
1288-
MPI_Allreduce(MPI_IN_PLACE, &e->s->max_mpole_power,
1288+
MPI_Allreduce(MPI_IN_PLACE, e->s->max_mpole_power,
12891289
SELF_GRAVITY_MULTIPOLE_ORDER + 1, MPI_FLOAT, MPI_MAX,
12901290
MPI_COMM_WORLD);
12911291
#endif

src/fof.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4146,7 +4146,7 @@ void fof_compute_group_props(struct fof_props *props,
41464146
/* Dump group data. */
41474147
if (dump_results) {
41484148
#ifdef HAVE_HDF5
4149-
write_fof_hdf5_catalogue(props, num_groups_local, s->e);
4149+
write_fof_hdf5_catalogue(props, (long long)num_groups_local, s->e);
41504150
#else
41514151
error("Can't dump hdf5 catalogues with hdf5 switched off!");
41524152
#endif

src/fof_catalogue_io.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,7 @@ void write_fof_hdf5_array(
506506
}
507507

508508
void write_fof_hdf5_catalogue(const struct fof_props* props,
509-
const size_t num_groups, const struct engine* e) {
509+
long long num_groups, const struct engine* e) {
510510

511511
char file_name[512];
512512
#ifdef WITH_MPI

src/fof_catalogue_io.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
#ifdef WITH_FOF
2626

2727
void write_fof_hdf5_catalogue(const struct fof_props *props,
28-
const size_t num_groups, const struct engine *e);
28+
long long num_groups, const struct engine *e);
2929

3030
#endif /* WITH_FOF */
3131

src/neutrino/Default/neutrino.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ void compute_neutrino_diagnostics(
215215
ppi_sum, mass_sum, weight2_sum};
216216
double total_sums[7];
217217

218-
MPI_Reduce(&sums, &total_sums, 7, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
218+
MPI_Reduce(sums, total_sums, 7, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
219219

220220
double total_p = total_sums[0];
221221
double total_p2 = total_sums[1];

src/parallel_io.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1610,7 +1610,7 @@ void write_output_parallel(struct engine* e,
16101610
}
16111611

16121612
/* Compute offset in the file and total number of particles */
1613-
size_t N[swift_type_count] = {
1613+
long long N[swift_type_count] = {
16141614
Ngas_written, Ndm_written, Ndm_background, Nsinks_written,
16151615
Nstars_written, Nblackholes_written, Ndm_neutrino};
16161616
long long N_total[swift_type_count] = {0};

src/space.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1664,8 +1664,8 @@ void space_remap_ids(struct space *s, int nr_nodes, int verbose) {
16641664

16651665
if (verbose) message("Remapping all the IDs");
16661666

1667-
size_t local_nr_dm_background = 0;
1668-
size_t local_nr_nuparts = 0;
1667+
long long local_nr_dm_background = 0;
1668+
long long local_nr_nuparts = 0;
16691669
for (size_t i = 0; i < s->nr_gparts; ++i) {
16701670
if (s->gparts[i].type == swift_type_neutrino)
16711671
local_nr_nuparts++;
@@ -1674,17 +1674,17 @@ void space_remap_ids(struct space *s, int nr_nodes, int verbose) {
16741674
}
16751675

16761676
/* Get the current local number of particles */
1677-
const size_t local_nr_parts = s->nr_parts;
1678-
const size_t local_nr_sinks = s->nr_sinks;
1679-
const size_t local_nr_gparts = s->nr_gparts;
1680-
const size_t local_nr_sparts = s->nr_sparts;
1681-
const size_t local_nr_bparts = s->nr_bparts;
1682-
const size_t local_nr_baryons =
1677+
long long local_nr_parts = s->nr_parts;
1678+
long long local_nr_sinks = s->nr_sinks;
1679+
long long local_nr_gparts = s->nr_gparts;
1680+
long long local_nr_sparts = s->nr_sparts;
1681+
long long local_nr_bparts = s->nr_bparts;
1682+
long long local_nr_baryons =
16831683
local_nr_parts + local_nr_sinks + local_nr_sparts + local_nr_bparts;
1684-
const size_t local_nr_dm = local_nr_gparts > 0
1685-
? local_nr_gparts - local_nr_baryons -
1686-
local_nr_nuparts - local_nr_dm_background
1687-
: 0;
1684+
long long local_nr_dm = local_nr_gparts > 0
1685+
? local_nr_gparts - local_nr_baryons -
1686+
local_nr_nuparts - local_nr_dm_background
1687+
: 0;
16881688

16891689
/* Get the global offsets */
16901690
long long offset_parts = 0;
@@ -1750,21 +1750,21 @@ void space_remap_ids(struct space *s, int nr_nodes, int verbose) {
17501750
total_bparts + total_nuparts);
17511751

17521752
/* We can now remap the IDs in the range [offset offset + local_nr] */
1753-
for (size_t i = 0; i < local_nr_parts; ++i) {
1753+
for (long long i = 0; i < local_nr_parts; ++i) {
17541754
s->parts[i].id = offset_parts + i;
17551755
}
1756-
for (size_t i = 0; i < local_nr_sinks; ++i) {
1756+
for (long long i = 0; i < local_nr_sinks; ++i) {
17571757
s->sinks[i].id = offset_sinks + i;
17581758
}
1759-
for (size_t i = 0; i < local_nr_sparts; ++i) {
1759+
for (long long i = 0; i < local_nr_sparts; ++i) {
17601760
s->sparts[i].id = offset_sparts + i;
17611761
}
1762-
for (size_t i = 0; i < local_nr_bparts; ++i) {
1762+
for (long long i = 0; i < local_nr_bparts; ++i) {
17631763
s->bparts[i].id = offset_bparts + i;
17641764
}
1765-
size_t count_dm = 0;
1766-
size_t count_dm_background = 0;
1767-
size_t count_nu = 0;
1765+
long long count_dm = 0;
1766+
long long count_dm_background = 0;
1767+
long long count_nu = 0;
17681768
for (size_t i = 0; i < s->nr_gparts; ++i) {
17691769
if (s->gparts[i].type == swift_type_dark_matter) {
17701770
s->gparts[i].id_or_neg_offset = offset_dm + count_dm;

src/space_unique_id.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ void space_update_unique_id(struct space *s) {
4848
return;
4949
}
5050

51-
const int require_new_batch = s->unique_id.next_batch.current == 0;
51+
int require_new_batch = s->unique_id.next_batch.current == 0;
5252

5353
#ifdef WITH_MPI
5454
const struct engine *e = s->e;

src/task.c

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1575,32 +1575,33 @@ void task_dump_stats(const char *dumpfile, struct engine *e,
15751575
/* Get these from all ranks for output from rank 0. Could wrap these into a
15761576
* single operation. */
15771577
size_t size = task_type_count * task_subtype_count;
1578-
int res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : sum), sum, size,
1579-
MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
1578+
int res =
1579+
MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : &sum[0][0]), &sum[0][0],
1580+
size, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
15801581
if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task sums");
15811582

1582-
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : tsum), tsum, size,
1583-
MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
1583+
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : &tsum[0][0]),
1584+
&tsum[0][0], size, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
15841585
if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task tsums");
15851586

1586-
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : count), count, size,
1587-
MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
1587+
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : &count[0][0]),
1588+
&count[0][0], size, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
15881589
if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task counts");
15891590

1590-
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : min), min, size,
1591-
MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
1591+
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : &min[0][0]), &min[0][0],
1592+
size, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
15921593
if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task minima");
15931594

1594-
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : tmin), tmin, size,
1595-
MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
1595+
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : &tmin[0][0]),
1596+
&tmin[0][0], size, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
15961597
if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task minima");
15971598

1598-
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : max), max, size,
1599-
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
1599+
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : &max[0][0]), &max[0][0],
1600+
size, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
16001601
if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task maxima");
16011602

1602-
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : tmax), tmax, size,
1603-
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
1603+
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : &tmax[0][0]),
1604+
&tmax[0][0], size, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
16041605
if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task maxima");
16051606

16061607
res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : total), total, 1,

0 commit comments

Comments
 (0)