45
45
Warning
46
46
-------
47
47
To use MPI collective writing, you need to call first the class methods :class:`Rectilinear.initMPI` (cf their docstring).
48
- Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, wether the code is run in parallel or not.
49
-
50
- > ⚠️ Also : this module can only be imported with **Python 3.11 or higher** !
48
+ Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, whether the code is run in parallel or not.
51
49
"""
52
50
import os
53
51
import numpy as np
@@ -202,7 +200,7 @@ def initialize(self):
202
200
if not self .ALLOW_OVERWRITE :
203
201
assert not os .path .isfile (
204
202
self .fileName
205
- ), "file already exists, use FieldsIO.ALLOW_OVERWRITE = True to allow overwriting"
203
+ ), f "file { self . fileName !r } already exists, use FieldsIO.ALLOW_OVERWRITE = True to allow overwriting"
206
204
207
205
with open (self .fileName , "w+b" ) as f :
208
206
self .hBase .tofile (f )
@@ -475,7 +473,7 @@ def toVTR(self, baseName, varNames, idxFormat="{:06d}"):
475
473
476
474
Example
477
475
-------
478
- >>> # Suppose the FieldsIO object is already writen into outputs.pysdc
476
+ >>> # Suppose the FieldsIO object is already written into outputs.pysdc
479
477
>>> import os
480
478
>>> from pySDC.utils.fieldsIO import Rectilinear
481
479
>>> os.makedirs("vtrFiles") # to store all VTR files into a subfolder
@@ -494,12 +492,13 @@ def toVTR(self, baseName, varNames, idxFormat="{:06d}"):
494
492
# MPI-parallel implementation
495
493
# -------------------------------------------------------------------------
496
494
comm : MPI .Intracomm = None
495
+ _nCollectiveIO = None
497
496
498
497
@classmethod
499
498
def setupMPI (cls , comm : MPI .Intracomm , iLoc , nLoc ):
500
499
"""
501
500
Setup the MPI mode for the files IO, considering a decomposition
502
- of the 1D grid into contiuous subintervals.
501
+ of the 1D grid into contiguous subintervals.
503
502
504
503
Parameters
505
504
----------
@@ -514,6 +513,20 @@ def setupMPI(cls, comm: MPI.Intracomm, iLoc, nLoc):
514
513
cls .iLoc = iLoc
515
514
cls .nLoc = nLoc
516
515
cls .mpiFile = None
516
+ cls ._nCollectiveIO = None
517
+
518
+ @property
519
+ def nCollectiveIO (self ):
520
+ """
521
+ Number of collective IO operations over all processes, when reading or writing a field.
522
+
523
+ Returns:
524
+ --------
525
+ int: Number of collective IO accesses
526
+ """
527
+ if self ._nCollectiveIO is None :
528
+ self ._nCollectiveIO = self .comm .allreduce (self .nVar * np .prod (self .nLoc [:- 1 ]), op = MPI .MAX )
529
+ return self ._nCollectiveIO
517
530
518
531
@property
519
532
def MPI_ON (self ):
@@ -541,7 +554,7 @@ def MPI_WRITE(self, data):
541
554
"""Write data (np.ndarray) in the binary file in MPI mode, at the current file cursor position."""
542
555
self .mpiFile .Write (data )
543
556
544
- def MPI_WRITE_AT (self , offset , data : np .ndarray ):
557
+ def MPI_WRITE_AT_ALL (self , offset , data : np .ndarray ):
545
558
"""
546
559
Write data in the binary file in MPI mode, with a given offset
547
560
**relative to the beginning of the file**.
@@ -553,9 +566,9 @@ def MPI_WRITE_AT(self, offset, data: np.ndarray):
553
566
data : np.ndarray
554
567
Data to be written in the binary file.
555
568
"""
556
- self .mpiFile .Write_at (offset , data )
569
+ self .mpiFile .Write_at_all (offset , data )
557
570
558
- def MPI_READ_AT (self , offset , data ):
571
+ def MPI_READ_AT_ALL (self , offset , data : np . ndarray ):
559
572
"""
560
573
Read data from the binary file in MPI mode, with a given offset
561
574
**relative to the beginning of the file**.
@@ -567,7 +580,7 @@ def MPI_READ_AT(self, offset, data):
567
580
data : np.ndarray
568
581
Array on which to read the data from the binary file.
569
582
"""
570
- self .mpiFile .Read_at (offset , data )
583
+ self .mpiFile .Read_at_all (offset , data )
571
584
572
585
def MPI_FILE_CLOSE (self ):
573
586
"""Close the binary file in MPI mode"""
@@ -620,13 +633,22 @@ def addField(self, time, field):
620
633
621
634
offset0 = self .fileSize
622
635
self .MPI_FILE_OPEN (mode = "a" )
636
+ nWrites = 0
637
+ nCollectiveIO = self .nCollectiveIO
638
+
623
639
if self .MPI_ROOT :
624
640
self .MPI_WRITE (np .array (time , dtype = T_DTYPE ))
625
641
offset0 += self .tSize
626
642
627
643
for (iVar , * iBeg ) in itertools .product (range (self .nVar ), * [range (n ) for n in self .nLoc [:- 1 ]]):
628
644
offset = offset0 + self .iPos (iVar , iBeg ) * self .itemSize
629
- self .MPI_WRITE_AT (offset , field [iVar , * iBeg ])
645
+ self .MPI_WRITE_AT_ALL (offset , field [(iVar , * iBeg )])
646
+ nWrites += 1
647
+
648
+ for _ in range (nCollectiveIO - nWrites ):
649
+ # Additional collective write to catch up with other processes
650
+ self .MPI_WRITE_AT_ALL (offset0 , field [:0 ])
651
+
630
652
self .MPI_FILE_CLOSE ()
631
653
632
654
def iPos (self , iVar , iX ):
@@ -669,9 +691,18 @@ def readField(self, idx):
669
691
field = np .empty ((self .nVar , * self .nLoc ), dtype = self .dtype )
670
692
671
693
self .MPI_FILE_OPEN (mode = "r" )
694
+ nReads = 0
695
+ nCollectiveIO = self .nCollectiveIO
696
+
672
697
for (iVar , * iBeg ) in itertools .product (range (self .nVar ), * [range (n ) for n in self .nLoc [:- 1 ]]):
673
698
offset = offset0 + self .iPos (iVar , iBeg ) * self .itemSize
674
- self .MPI_READ_AT (offset , field [iVar , * iBeg ])
699
+ self .MPI_READ_AT_ALL (offset , field [(iVar , * iBeg )])
700
+ nReads += 1
701
+
702
+ for _ in range (nCollectiveIO - nReads ):
703
+ # Additional collective read to catch up with other processes
704
+ self .MPI_READ_AT_ALL (offset0 , field [:0 ])
705
+
675
706
self .MPI_FILE_CLOSE ()
676
707
677
708
return t , field
@@ -684,7 +715,7 @@ def initGrid(nVar, gridSizes):
684
715
dim = len (gridSizes )
685
716
coords = [np .linspace (0 , 1 , num = n , endpoint = False ) for n in gridSizes ]
686
717
s = [None ] * dim
687
- u0 = np .array (np .arange (nVar ) + 1 )[: , * s ]
718
+ u0 = np .array (np .arange (nVar ) + 1 )[( slice ( None ) , * s ) ]
688
719
for x in np .meshgrid (* coords , indexing = "ij" ):
689
720
u0 = u0 * x
690
721
return coords , u0
@@ -706,8 +737,7 @@ def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):
706
737
iLoc , nLoc = blocks .localBounds
707
738
Rectilinear .setupMPI (comm , iLoc , nLoc )
708
739
s = [slice (i , i + n ) for i , n in zip (iLoc , nLoc )]
709
- u0 = u0 [:, * s ]
710
- print (MPI_RANK , u0 .shape )
740
+ u0 = u0 [(slice (None ), * s )]
711
741
712
742
f1 = Rectilinear (DTYPES [dtypeIdx ], fileName )
713
743
f1 .setHeader (nVar = nVar , coords = coords )
@@ -726,6 +756,11 @@ def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):
726
756
def compareFields_MPI (fileName , u0 , nSteps ):
727
757
from pySDC .helpers .fieldsIO import FieldsIO
728
758
759
+ comm = MPI .COMM_WORLD
760
+ MPI_RANK = comm .Get_rank ()
761
+ if MPI_RANK == 0 :
762
+ print ("Comparing fields with MPI" )
763
+
729
764
f2 = FieldsIO .fromFile (fileName )
730
765
731
766
times = np .arange (nSteps ) / nSteps
0 commit comments