Skip to content

Commit

Permalink
Merge pull request #1990 from NCAR/fix/largedof
Browse files Browse the repository at this point in the history
Fix/largedof
  • Loading branch information
jedwards4b authored Jan 22, 2025
2 parents 6539ef0 + 7f659a3 commit 09dcc06
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 22 deletions.
32 changes: 24 additions & 8 deletions src/clib/pio_darray_int.c
Original file line number Diff line number Diff line change
Expand Up @@ -430,14 +430,19 @@ write_darray_multi_par(file_desc_t *file, int nvars, int fndims, const int *vari

ierr = ncmpi_wait_all(file->fh, NC_REQ_ALL, NULL, NULL);
#endif /* _PNETCDF */

MPI_Offset chkcnt2=0;
/* Process each region of data to be written. */
for (int regioncnt = 0; regioncnt < num_regions; regioncnt++)
{
/* Fill the start/count arrays. */
if ((ierr = find_start_count(iodesc->ndims, fndims, vdesc, region, frame,
start, count)))
return pio_err(ios, file, ierr, __FILE__, __LINE__);
size_t cnt = 1;
for(int i=0; i<fndims; i++){
cnt *= count[i];
}
chkcnt2 += cnt;

/* IO tasks will run the netCDF/pnetcdf functions to write the data. */
switch (file->iotype)
Expand Down Expand Up @@ -490,10 +495,10 @@ write_darray_multi_par(file_desc_t *file, int nvars, int fndims, const int *vari
for (int i = 0; i < fndims; i++)
{
startlist[rrcnt][i] = start[i];
countlist[rrcnt][i] = count[i];
PLOG((3, "startlist[%d][%d] = %d countlist[%d][%d] = %d", rrcnt, i,
countlist[rrcnt][i] = count[i];
PLOG((3, "startlist[%d][%d] = %lld countlist[%d][%d] = %lld", rrcnt, i,
startlist[rrcnt][i], rrcnt, i, countlist[rrcnt][i]));
}
}
rrcnt++;
}

Expand Down Expand Up @@ -645,11 +650,22 @@ write_darray_multi_par(file_desc_t *file, int nvars, int fndims, const int *vari
else /* don't flush yet, accumulate the request size */
vard_llen += llen;
#else

ierr = ncmpi_iput_varn(file->fh, varids[nv], rrcnt, startlist, countlist,
bufptr, llen, iodesc->mpitype, NULL);


bufptr, chkcnt2, iodesc->mpitype, NULL);
// bufptr, llen, iodesc->mpitype, NULL);


if (ierr){
MPI_Offset chksize=0;
for (int j = 0; j < rrcnt; j++)
{
/* printf("%d: nv=%d startlist[%d][%d] = %lld countlist[%d][%d] = %lld\n", ios->io_rank, nv, j, i,
startlist[j][i], j, i, countlist[j][i]);
*/
chksize += countlist[j][0]*countlist[j][1]*countlist[j][2];
}
printf("llen = %lld chksize = %lld chkcnt2 = %lld\n",llen, chksize, chkcnt2);
}
vdesc->nreqs++;
#endif
}
Expand Down
17 changes: 3 additions & 14 deletions src/clib/pio_rearrange.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,6 @@ expand_region(int dim, const int *gdimlen, int maplen, const PIO_Offset *map,
int region_size, int region_stride, const int *max_size,
PIO_Offset *count)
{
/* Flag used to signal that we can no longer expand the region
along dimension dim. */
int expansion_done = 0;
/* Check inputs. */
pioassert(dim >= 0 && gdimlen && maplen >= 0 && map && region_size >= 0 &&
maplen >= region_size && region_stride >= 0 && max_size && count,
Expand All @@ -100,26 +97,18 @@ expand_region(int dim, const int *gdimlen, int maplen, const PIO_Offset *map,
Assuming monotonicity in the map, we could skip this for the
innermost dimension, but it's necessary past that because the
region does not necessarily comprise contiguous values. */
for (int j = 0; j < region_size; j++)
int test_idx = i * region_size;
for (int j = 0; j < region_size; j++, test_idx++)
{
int test_idx; /* Index we are testing. */

test_idx = j + i * region_size;

/* If we have exhausted the map, or the map no longer matches,
we are done, break out of both loops. */
//PLOG((3,"dim=%d maplen = %d map[%d]=%d map[%d]=%d i=%d region_stride=%d",dim, maplen, test_idx, map[test_idx], j, map[j],i,region_stride));
if (test_idx >= maplen || map[test_idx] != map[j] + i * region_stride)
{
expansion_done = 1;
break;
return;
}
}
if (expansion_done)
break;

}
PLOG((3,"expansion_done = %d count[%d]=%ld",expansion_done, dim, count[dim]));
/* Move on to next outermost dimension if there are more left,
* else return. */
if (dim > 0)
Expand Down

0 comments on commit 09dcc06

Please sign in to comment.