-
Notifications
You must be signed in to change notification settings - Fork 40
Add OpenPMD support #1050
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Add OpenPMD support #1050
Changes from 39 commits
49027c7
4525e86
4de250c
b906af7
108fc7a
79660a2
372b585
8d40c91
4f20c26
4dde705
f29e8d1
a501a5d
c0be75c
7f03528
56795f2
8587303
8bf955b
e3ea8d7
33b6261
788118c
62e54da
6309663
7224f42
ae1f241
19863d7
e2b2bd1
f9373e8
96a3f4c
61306a0
56976a8
6199843
9fb9f68
684b7ab
251c6ea
03f80c7
890fffe
04359d3
2b89659
804e60d
a436f55
6a3a80d
28d725d
a039ea1
ae8519f
7476641
39d4b99
d2ba882
626303d
4241198
101ebf2
60a38b2
461eeaa
cd007d3
28020db
5324e00
af4b966
0e015d1
5135aea
b344291
7486915
0e3f758
e39ef61
b938511
08d6b41
bf74c7e
3b37c26
0ca5b6f
b30788a
5f95466
6811594
c0d7f11
b2d7525
f229274
3283635
d59d573
3f95fc4
1141ff3
e6e4d0b
8230f94
e64ae7e
b789711
466ecd2
3aa4c78
4c1c70f
8a1850b
1f02ffa
d6565c5
6065213
4bf6a92
e67f589
503a5b6
d064f03
dfbcb27
4769a63
543dbf9
449cd5e
7f813c0
3689e6c
2db2db7
aea0519
57b719a
9bed0ce
9ae2fde
0ac42c6
d230db9
41c94c5
642e23d
8e089bb
1b8b904
a1768b0
69fc044
3d48016
2a0cde8
301f25b
0d5d4d3
207d24b
4fe5d63
762a9ce
be68203
5ce4304
ce7ef95
80e3ef7
f133962
fbfd356
c7f8ab5
dd94417
9d90214
1988ed2
38b1ec5
b3849d4
51f45b7
dcc025d
04f0cab
3ab5353
11a9884
43fcf78
be19c0a
fcc4a5b
72412bd
73e445f
d6936ae
4924929
cb8997b
fa25d69
4f8249e
6787b42
0a5df77
3edc7af
7cc4040
e671e67
6ab9d02
6cd7c39
56081fe
dc60f00
3267252
505cdb4
c657226
39c2a9a
dcd576c
beeacba
00d9476
4d33816
9b30739
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -118,6 +118,12 @@ class Params { | |
| return it->second; | ||
| } | ||
|
|
||
| const Mutability &GetMutability(const std::string &key) const { | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍 |
||
| auto const it = myMutable_.find(key); | ||
| PARTHENON_REQUIRE_THROWS(it != myMutable_.end(), "Key " + key + " doesn't exist"); | ||
| return it->second; | ||
| } | ||
|
|
||
| std::vector<std::string> GetKeys() const { | ||
| std::vector<std::string> keys; | ||
| for (auto &x : myParams_) { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -15,6 +15,7 @@ | |
| // the public, perform publicly and display publicly, and to permit others to do so. | ||
| //======================================================================================== | ||
|
|
||
| #include <cstdint> | ||
| #include <map> | ||
| #include <set> | ||
| #include <string> | ||
|
|
@@ -29,6 +30,7 @@ | |
| #include "mesh/mesh.hpp" | ||
| #include "mesh/meshblock.hpp" | ||
| #include "outputs/output_utils.hpp" | ||
| #include "utils/mpi_types.hpp" | ||
|
|
||
| namespace parthenon { | ||
| namespace OutputUtils { | ||
|
|
@@ -241,6 +243,45 @@ std::vector<int> ComputeIDsAndFlags(Mesh *pm) { | |
| }); | ||
| } | ||
|
|
||
| template <typename T> | ||
| std::vector<T> FlattendedLocalToGlobal(Mesh *pm, const std::vector<T> &data_local) { | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't understand what this function does. Is it actually doing an MPI all-to-all to build up a global data vector? Is this something we ever want to do? |
||
| const int n_blocks_global = pm->nbtotal; | ||
| const int n_blocks_local = static_cast<int>(pm->block_list.size()); | ||
|
|
||
| const int n_elem = data_local.size() / n_blocks_local; | ||
| PARTHENON_REQUIRE_THROWS(data_local.size() % n_blocks_local == 0, | ||
| "Results from flattened input vector does not evenly divide " | ||
| "into number of local blocks."); | ||
| std::vector<T> data_global(n_elem * n_blocks_global); | ||
|
|
||
| std::vector<int> counts(Globals::nranks); | ||
| std::vector<int> offsets(Globals::nranks); | ||
|
|
||
| const auto &nblist = pm->GetNbList(); | ||
| counts[0] = n_elem * nblist[0]; | ||
| offsets[0] = 0; | ||
| for (int r = 1; r < Globals::nranks; r++) { | ||
| counts[r] = n_elem * nblist[r]; | ||
| offsets[r] = offsets[r - 1] + counts[r - 1]; | ||
| } | ||
|
|
||
| #ifdef MPI_PARALLEL | ||
| PARTHENON_MPI_CHECK(MPI_Allgatherv(data_local.data(), counts[Globals::my_rank], | ||
| MPITypeMap<T>::type(), data_global.data(), | ||
| counts.data(), offsets.data(), MPITypeMap<T>::type(), | ||
| MPI_COMM_WORLD)); | ||
| #else | ||
| return data_local; | ||
| #endif | ||
| return data_global; | ||
| } | ||
|
|
||
| // explicit template instantiation | ||
| template std::vector<int64_t> | ||
| FlattendedLocalToGlobal(Mesh *pm, const std::vector<int64_t> &data_local); | ||
| template std::vector<int> FlattendedLocalToGlobal(Mesh *pm, | ||
| const std::vector<int> &data_local); | ||
|
Comment on lines
341
to
349
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should this be templated if we only instantiated it for int64? |
||
|
|
||
| // TODO(JMM): I could make this use the other loop | ||
| // functionality/high-order functions. but it was more code than this | ||
| // for, I think, little benefit. | ||
|
|
@@ -313,5 +354,34 @@ std::size_t MPISum(std::size_t val) { | |
| return val; | ||
| } | ||
|
|
||
| VariableVector<Real> GetVarsToWrite(const std::shared_ptr<MeshBlock> pmb, | ||
| const bool restart, | ||
| const std::vector<std::string> &variables) { | ||
| const auto &var_vec = pmb->meshblock_data.Get()->GetVariableVector(); | ||
| auto vars_to_write = GetAnyVariables(var_vec, variables); | ||
| if (restart) { | ||
| // get all vars with flag Independent OR restart | ||
| auto restart_vars = GetAnyVariables( | ||
| var_vec, {parthenon::Metadata::Independent, parthenon::Metadata::Restart}); | ||
| for (auto restart_var : restart_vars) { | ||
| vars_to_write.emplace_back(restart_var); | ||
| } | ||
| } | ||
| return vars_to_write; | ||
| } | ||
|
|
||
| std::vector<VarInfo> GetAllVarsInfo(const VariableVector<Real> &vars, | ||
| const IndexShape &cellbounds) { | ||
| std::vector<VarInfo> all_vars_info; | ||
| for (auto &v : vars) { | ||
| all_vars_info.emplace_back(v, cellbounds); | ||
| } | ||
|
|
||
| // sort alphabetically | ||
| std::sort(all_vars_info.begin(), all_vars_info.end(), | ||
| [](const VarInfo &a, const VarInfo &b) { return a.label < b.label; }); | ||
| return all_vars_info; | ||
| } | ||
|
|
||
| } // namespace OutputUtils | ||
| } // namespace parthenon | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -339,13 +339,29 @@ std::vector<Real> ComputeXminBlocks(Mesh *pm); | |
| std::vector<int64_t> ComputeLocs(Mesh *pm); | ||
| std::vector<int> ComputeIDsAndFlags(Mesh *pm); | ||
|
|
||
| // Takes a vector containing flattened data of all rank local blocks and returns the | ||
| // flattened data over all blocks. | ||
| template <typename T> | ||
| std::vector<T> FlattendedLocalToGlobal(Mesh *pm, const std::vector<T> &data_local); | ||
|
|
||
| // TODO(JMM): Potentially unsafe if MPI_UNSIGNED_LONG_LONG isn't a size_t | ||
| // however I think it's probably safe to assume we'll be on systems | ||
| // where this is the case? | ||
| // TODO(JMM): If we ever need non-int need to generalize | ||
| std::size_t MPIPrefixSum(std::size_t local, std::size_t &tot_count); | ||
| std::size_t MPISum(std::size_t local); | ||
|
|
||
| // Return all variables to write, i.e., for restarts all indpendent variables and ones | ||
| // with explicit Restart flag, but also variables explicitly defined to output in the | ||
| // input file. | ||
| VariableVector<Real> GetVarsToWrite(const std::shared_ptr<MeshBlock> pmb, | ||
| const bool restart, | ||
| const std::vector<std::string> &variables); | ||
|
|
||
| // Returns a sorted vector of VarInfo associated with vars | ||
| std::vector<VarInfo> GetAllVarsInfo(const VariableVector<Real> &vars, | ||
| const IndexShape &cellbounds); | ||
|
Comment on lines
+383
to
+392
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can these two functions be unified with the HDF5 machinery? I actually thought I already wrote
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I think we wrote them in parallel. |
||
|
|
||
| } // namespace OutputUtils | ||
| } // namespace parthenon | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -357,22 +357,20 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm | |
| const auto &pmb = pm->block_list[b_idx]; | ||
| bool is_allocated = false; | ||
|
|
||
| // for each variable that this local meshblock actually has | ||
| const auto vars = get_vars(pmb); | ||
| for (auto &v : vars) { | ||
| // For reference, if we update the logic here, there's also | ||
| // a similar block in parthenon_manager.cpp | ||
| if (v->IsAllocated() && (var_name == v->label())) { | ||
| auto v_h = v->data.GetHostMirrorAndCopy(); | ||
| OutputUtils::PackOrUnpackVar( | ||
| vinfo, output_params.include_ghost_zones, index, | ||
| [&](auto index, int topo, int t, int u, int v, int k, int j, int i) { | ||
| tmpData[index] = static_cast<OutT>(v_h(topo, t, u, v, k, j, i)); | ||
| }); | ||
|
|
||
| is_allocated = true; | ||
| break; | ||
| } | ||
| // TODO(reviewers) Why was the loop originally there? Does the direct Get causes | ||
| // issue? | ||
|
Comment on lines
+397
to
+398
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure---it may have just been crazy code that no one tried to change. |
||
| auto v = pmb->meshblock_data.Get()->GetVarPtr(var_name); | ||
| // For reference, if we update the logic here, there's also | ||
| // a similar block in parthenon_manager.cpp | ||
| if (v->IsAllocated() && (var_name == v->label())) { | ||
| auto v_h = v->data.GetHostMirrorAndCopy(); | ||
| OutputUtils::PackOrUnpackVar( | ||
| vinfo, output_params.include_ghost_zones, index, | ||
| [&](auto index, int topo, int t, int u, int v, int k, int j, int i) { | ||
| tmpData[index] = static_cast<OutT>(v_h(topo, t, u, v, k, j, i)); | ||
| }); | ||
|
|
||
| is_allocated = true; | ||
| } | ||
|
|
||
| if (vinfo.is_sparse) { | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.