|
14 | 14 | #include <experimental/filesystem> |
15 | 15 | namespace fs = std::experimental::filesystem; |
16 | 16 | #else |
17 | | - error "Missing the <filesystem> header." |
| 17 | + #error "Missing the <filesystem> header." |
18 | 18 | #endif |
19 | 19 | #include <iomanip> |
20 | 20 | #include "dump.hpp" |
@@ -62,6 +62,97 @@ void Dump::RegisterVariable(IdefixHostArray4D<real>& in, |
62 | 62 | } |
63 | 63 |
|
64 | 64 |
|
| 65 | + |
| 66 | +void Dump::CreateMPIDataType(GridBox gb, bool read) { |
| 67 | + #ifdef WITH_MPI |
| 68 | + int start[3]; |
| 69 | + int size[3]; |
| 70 | + int subsize[3]; |
| 71 | + |
| 72 | + // the grid is required to now the current MPÏ domain decomposition |
| 73 | + Grid *grid = data->mygrid; |
| 74 | + |
| 75 | + // Dimensions for cell-centered fields |
| 76 | + for(int dir = 0; dir < 3 ; dir++) { |
| 77 | + size[2-dir] = gb.sizeGlob[dir]; |
| 78 | + start[2-dir] = gb.start[dir]; |
| 79 | + subsize[2-dir] = gb.size[dir]; |
| 80 | + } |
| 81 | + if(read) { |
| 82 | + MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
| 83 | + MPI_ORDER_C, realMPI, &this->descCR)); |
| 84 | + MPI_SAFE_CALL(MPI_Type_commit(&this->descCR)); |
| 85 | + } else { |
| 86 | + MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
| 87 | + MPI_ORDER_C, realMPI, &this->descCW)); |
| 88 | + MPI_SAFE_CALL(MPI_Type_commit(&this->descCW)); |
| 89 | + } |
| 90 | + |
| 91 | + // Dimensions for face-centered field |
| 92 | + for(int face = 0; face < 3 ; face++) { |
| 93 | + for(int dir = 0; dir < 3 ; dir++) { |
| 94 | + size[2-dir] = gb.sizeGlob[dir]; |
| 95 | + start[2-dir] = gb.start[dir]; |
| 96 | + subsize[2-dir] = gb.size[dir]; |
| 97 | + } |
| 98 | + if(read) { |
| 99 | + // Add the extra guy in the face direction |
| 100 | + size[2-face]++; |
| 101 | + subsize[2-face]++; // valid only for reading |
| 102 | + //since it involves an overlap of data between procs |
| 103 | + |
| 104 | + MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
| 105 | + MPI_ORDER_C, realMPI, &this->descSR[face])); |
| 106 | + MPI_SAFE_CALL(MPI_Type_commit(&this->descSR[face])); |
| 107 | + } else { |
| 108 | + // Now for writing, it is only the last proc which keeps one additional cell |
| 109 | + size[2-face]++; |
| 110 | + if(grid->xproc[face] == grid->nproc[face] - 1 ) subsize[2-face]++; |
| 111 | + MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
| 112 | + MPI_ORDER_C, realMPI, &this->descSW[face])); |
| 113 | + MPI_SAFE_CALL(MPI_Type_commit(&this->descSW[face])); |
| 114 | + } |
| 115 | + } |
| 116 | + // Dimensions for edge-centered field |
| 117 | + for(int nv = 0; nv < 3 ; nv++) { |
| 118 | + // load the array size |
| 119 | + for(int dir = 0; dir < 3 ; dir++) { |
| 120 | + size[2-dir] = gb.sizeGlob[dir]; |
| 121 | + start[2-dir] = gb.start[dir]; |
| 122 | + subsize[2-dir] = gb.size[dir]; |
| 123 | + } |
| 124 | + |
| 125 | + if(read) { |
| 126 | + // Extra cell in the dirs perp to field |
| 127 | + for(int i = 0 ; i < DIMENSIONS ; i++) { |
| 128 | + if(i!=nv) { |
| 129 | + size[2-i]++; |
| 130 | + subsize[2-i]++; // valid only for reading |
| 131 | + //since it involves an overlap of data between procs |
| 132 | + } |
| 133 | + } |
| 134 | + MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
| 135 | + MPI_ORDER_C, realMPI, &this->descER[nv])); |
| 136 | + MPI_SAFE_CALL(MPI_Type_commit(&this->descER[nv])); |
| 137 | + } else { |
| 138 | + // Now for writing, it is only the last proc which keeps one additional cell, |
| 139 | + // so we remove what we added for reads |
| 140 | + for(int i = 0 ; i < DIMENSIONS ; i++) { |
| 141 | + if(i!=nv) { |
| 142 | + size[2-i]++; |
| 143 | + if(grid->xproc[i] == grid->nproc[i] - 1 ) { |
| 144 | + subsize[2-i]++; |
| 145 | + } |
| 146 | + } |
| 147 | + } |
| 148 | + MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
| 149 | + MPI_ORDER_C, realMPI, &this->descEW[nv])); |
| 150 | + MPI_SAFE_CALL(MPI_Type_commit(&this->descEW[nv])); |
| 151 | + } |
| 152 | + } |
| 153 | + #endif |
| 154 | +} |
| 155 | + |
65 | 156 | void Dump::Init(DataBlock *datain) { |
66 | 157 | idfx::pushRegion("Dump::Init"); |
67 | 158 | this->data = datain; |
@@ -102,79 +193,15 @@ void Dump::Init(DataBlock *datain) { |
102 | 193 |
|
103 | 194 | #ifdef WITH_MPI |
104 | 195 | Grid *grid = data->mygrid; |
105 | | - |
106 | | - int start[3]; |
107 | | - int size[3]; |
108 | | - int subsize[3]; |
109 | | - |
110 | | - // Dimensions for cell-centered fields |
| 196 | + GridBox gb; |
111 | 197 | for(int dir = 0; dir < 3 ; dir++) { |
112 | | - size[2-dir] = grid->np_int[dir]; |
113 | | - start[2-dir] = data->gbeg[dir]-data->nghost[dir]; |
114 | | - subsize[2-dir] = data->np_int[dir]; |
| 198 | + gb.start[dir] = data->gbeg[dir]-data->nghost[dir]; |
| 199 | + gb.size[dir] = data->np_int[dir]; |
| 200 | + gb.sizeGlob[dir] = grid->np_int[dir]; |
115 | 201 | } |
116 | | - |
117 | | - MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
118 | | - MPI_ORDER_C, realMPI, &this->descC)); |
119 | | - MPI_SAFE_CALL(MPI_Type_commit(&this->descC)); |
120 | | - |
121 | | - // Dimensions for face-centered field |
122 | | - for(int face = 0; face < 3 ; face++) { |
123 | | - for(int dir = 0; dir < 3 ; dir++) { |
124 | | - size[2-dir] = grid->np_int[dir]; |
125 | | - start[2-dir] = data->gbeg[dir]-data->nghost[dir]; |
126 | | - subsize[2-dir] = data->np_int[dir]; |
127 | | - } |
128 | | - // Add the extra guy in the face direction |
129 | | - size[2-face]++; |
130 | | - subsize[2-face]++; // valid only for reading |
131 | | - //since it involves an overlap of data between procs |
132 | | - |
133 | | - MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
134 | | - MPI_ORDER_C, realMPI, &this->descSR[face])); |
135 | | - MPI_SAFE_CALL(MPI_Type_commit(&this->descSR[face])); |
136 | | - |
137 | | - // Now for writing, it is only the last proc which keeps one additional cell |
138 | | - if(grid->xproc[face] != grid->nproc[face] - 1 ) subsize[2-face]--; |
139 | | - MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
140 | | - MPI_ORDER_C, realMPI, &this->descSW[face])); |
141 | | - MPI_SAFE_CALL(MPI_Type_commit(&this->descSW[face])); |
142 | | - } |
143 | | - // Dimensions for edge-centered field |
144 | | - for(int nv = 0; nv < 3 ; nv++) { |
145 | | - // load the array size |
146 | | - for(int dir = 0; dir < 3 ; dir++) { |
147 | | - size[2-dir] = grid->np_int[dir]; |
148 | | - start[2-dir] = data->gbeg[dir]-data->nghost[dir]; |
149 | | - subsize[2-dir] = data->np_int[dir]; |
150 | | - } |
151 | | - |
152 | | - // Extra cell in the dirs perp to field |
153 | | - for(int i = 0 ; i < DIMENSIONS ; i++) { |
154 | | - if(i!=nv) { |
155 | | - size[2-i]++; |
156 | | - subsize[2-i]++; // valid only for reading |
157 | | - //since it involves an overlap of data between procs |
158 | | - } |
159 | | - } |
160 | | - MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
161 | | - MPI_ORDER_C, realMPI, &this->descER[nv])); |
162 | | - MPI_SAFE_CALL(MPI_Type_commit(&this->descER[nv])); |
163 | | - |
164 | | - // Now for writing, it is only the last proc which keeps one additional cell, |
165 | | - // so we remove what we added for reads |
166 | | - for(int i = 0 ; i < DIMENSIONS ; i++) { |
167 | | - if(i!=nv) { |
168 | | - if(grid->xproc[i] != grid->nproc[i] - 1 ) { |
169 | | - subsize[2-i]--; |
170 | | - } |
171 | | - } |
172 | | - } |
173 | | - MPI_SAFE_CALL(MPI_Type_create_subarray(3, size, subsize, start, |
174 | | - MPI_ORDER_C, realMPI, &this->descEW[nv])); |
175 | | - MPI_SAFE_CALL(MPI_Type_commit(&this->descEW[nv])); |
176 | | - } |
177 | | - |
| 202 | + // Create MPI datatypes for read/write |
| 203 | + CreateMPIDataType(gb, false); |
| 204 | + CreateMPIDataType(gb, true); |
178 | 205 | #endif |
179 | 206 |
|
180 | 207 | // Register variables that are needed in restart dumps |
@@ -661,6 +688,7 @@ bool Dump::Read(Output& output, int readNumber ) { |
661 | 688 | } |
662 | 689 | // Todo: check that coordinates are identical |
663 | 690 | } |
| 691 | + |
664 | 692 | std::unordered_set<std::string> notFound {}; |
665 | 693 | for(auto it = dumpFieldMap.begin(); it != dumpFieldMap.end(); it++) { |
666 | 694 | notFound.insert(it->first); |
@@ -701,7 +729,7 @@ bool Dump::Read(Output& output, int readNumber ) { |
701 | 729 | } |
702 | 730 | } |
703 | 731 | if(scalar.GetLocation() == DumpField::ArrayLocation::Center) { |
704 | | - ReadDistributed(fileHdl, ndim, nx, nxglob, descC, scrch); |
| 732 | + ReadDistributed(fileHdl, ndim, nx, nxglob, descCR, scrch); |
705 | 733 | } else if(scalar.GetLocation() == DumpField::ArrayLocation::Face) { |
706 | 734 | ReadDistributed(fileHdl, ndim, nx, nxglob, descSR[direction], scrch); |
707 | 735 | } else if(scalar.GetLocation() == DumpField::ArrayLocation::Edge) { |
@@ -887,7 +915,7 @@ int Dump::Write(Output& output) { |
887 | 915 | } |
888 | 916 |
|
889 | 917 | if(scalar.GetLocation() == DumpField::ArrayLocation::Center) { |
890 | | - WriteDistributed(fileHdl, 3, nx, nxtot, fieldName, this->descC, scrch); |
| 918 | + WriteDistributed(fileHdl, 3, nx, nxtot, fieldName, this->descCW, scrch); |
891 | 919 | } else if(scalar.GetLocation() == DumpField::ArrayLocation::Face) { |
892 | 920 | WriteDistributed(fileHdl, 3, nx, nxtot, fieldName, this->descSW[dir], scrch); |
893 | 921 | } else if(scalar.GetLocation() == DumpField::ArrayLocation::Edge) { |
|
0 commit comments