Actual source code: mpi.c
1: /*
2: This provides a few of the MPI-uni functions that cannot be implemented
3: with C macros
4: */
5: #include <petscsys.h>
6: #if !defined(MPIUNI_H)
7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8: #endif
10: #include <petsc/private/petscimpl.h>
12: #if defined(PETSC_HAVE_CUDA)
13: #include <cuda_runtime.h>
14: #endif
16: #if defined(PETSC_HAVE_HIP)
17: #include <hip/hip_runtime.h>
18: #endif
20: #define MPI_SUCCESS 0
21: #define MPI_FAILURE 1
23: void *MPIUNI_TMP = NULL;
25: /*
26: With MPI Uni there are exactly four distinct communicators:
27: MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
29: MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
30: the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
32: */
33: #define MAX_ATTR 256
34: #define MAX_COMM 128
36: typedef struct {
37: void *attribute_val;
38: int active;
39: } MPI_Attr;
41: typedef struct {
42: void *extra_state;
43: MPI_Delete_function *del;
44: int active; /* Is this keyval in use by some comm? */
45: } MPI_Attr_keyval;
47: static MPI_Attr_keyval attr_keyval[MAX_ATTR];
48: static MPI_Attr attr[MAX_COMM][MAX_ATTR];
49: static int comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
50: static int mpi_tag_ub = 100000000;
51: static int num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
52: static int MaxComm = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
53: static void* MPIUNIF_mpi_in_place = 0;
55: #define CommIdx(comm) ((comm)-1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
57: #if defined(__cplusplus)
58: extern "C" {
59: #endif
61: /*
62: To avoid problems with prototypes to the system memcpy() it is duplicated here
63: */
64: int MPIUNI_Memcpy(void *dst,const void *src,int n)
65: {
66: if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
67: if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
68: if (!n) return MPI_SUCCESS;
70: /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
71: #if defined(PETSC_HAVE_CUDA)
72: if (PetscCUDAInitialized) {cudaError_t cerr = cudaMemcpy(dst,src,n,cudaMemcpyDefault);if (cerr != cudaSuccess) return MPI_FAILURE;} else
73: #elif defined(PETSC_HAVE_HIP)
74: if (PetscHIPInitialized) {hipError_t cerr = hipMemcpy(dst,src,n,hipMemcpyDefault); if (cerr != hipSuccess) return MPI_FAILURE;} else
75: #endif
76: {memcpy(dst,src,n);}
77: return MPI_SUCCESS;
78: }
80: static int classcnt = 0;
81: static int codecnt = 0;
83: int MPI_Add_error_class(int *cl)
84: {
85: *cl = classcnt++;
86: return MPI_SUCCESS;
87: }
89: int MPI_Add_error_code(int cl,int *co)
90: {
91: if (cl >= classcnt) return MPI_FAILURE;
92: *co = codecnt++;
93: return MPI_SUCCESS;
94: }
96: int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
97: {
98: int comb = datatype >> 28;
99: switch (comb) {
100: case MPI_COMBINER_NAMED:
101: *num_integers = 0;
102: *num_addresses = 0;
103: *num_datatypes = 0;
104: *combiner = comb;
105: break;
106: case MPI_COMBINER_DUP:
107: *num_integers = 0;
108: *num_addresses = 0;
109: *num_datatypes = 1;
110: *combiner = comb;
111: break;
112: case MPI_COMBINER_CONTIGUOUS:
113: *num_integers = 1;
114: *num_addresses = 0;
115: *num_datatypes = 1;
116: *combiner = comb;
117: break;
118: default:
119: return MPIUni_Abort(MPI_COMM_SELF,1);
120: }
121: return MPI_SUCCESS;
122: }
124: int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
125: {
126: int comb = datatype >> 28;
127: switch (comb) {
128: case MPI_COMBINER_NAMED:
129: return MPIUni_Abort(MPI_COMM_SELF,1);
130: case MPI_COMBINER_DUP:
131: if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
132: array_of_datatypes[0] = datatype & 0x0fffffff;
133: break;
134: case MPI_COMBINER_CONTIGUOUS:
135: if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
136: array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
137: array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
138: break;
139: default:
140: return MPIUni_Abort(MPI_COMM_SELF,1);
141: }
142: return MPI_SUCCESS;
143: }
145: /*
146: Used to set the built-in MPI_TAG_UB attribute
147: */
148: static int Keyval_setup(void)
149: {
150: attr[CommIdx(MPI_COMM_WORLD)][0].active = 1;
151: attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
152: attr[CommIdx(MPI_COMM_SELF)][0].active = 1;
153: attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
154: attr_keyval[0].active = 1;
155: return MPI_SUCCESS;
156: }
158: int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
159: {
160: int i,keyid;
161: for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
162: if (!attr_keyval[i].active) {
163: keyid = i;
164: goto found;
165: }
166: }
167: if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
168: keyid = num_attr++;
170: found:
171: attr_keyval[keyid].extra_state = extra_state;
172: attr_keyval[keyid].del = delete_fn;
173: attr_keyval[keyid].active = 1;
174: *keyval = keyid;
175: return MPI_SUCCESS;
176: }
178: int MPI_Comm_free_keyval(int *keyval)
179: {
180: attr_keyval[*keyval].extra_state = 0;
181: attr_keyval[*keyval].del = 0;
182: attr_keyval[*keyval].active = 0;
183: *keyval = 0;
184: return MPI_SUCCESS;
185: }
187: int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
188: {
189: int idx = CommIdx(comm);
190: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
191: attr[idx][keyval].active = 1;
192: attr[idx][keyval].attribute_val = attribute_val;
193: return MPI_SUCCESS;
194: }
196: int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
197: {
198: int idx = CommIdx(comm);
199: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
200: if (attr[idx][keyval].active && attr_keyval[keyval].del) {
201: void *save_attribute_val = attr[idx][keyval].attribute_val;
202: attr[idx][keyval].active = 0;
203: attr[idx][keyval].attribute_val = 0;
204: (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
205: }
206: return MPI_SUCCESS;
207: }
209: int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
210: {
211: int idx = CommIdx(comm);
212: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
213: if (!keyval) Keyval_setup();
214: *flag = attr[idx][keyval].active;
215: *(void**)attribute_val = attr[idx][keyval].attribute_val;
216: return MPI_SUCCESS;
217: }
219: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
220: {
221: int j;
222: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
223: for (j=3; j<=MaxComm; j++) {
224: if (!comm_active[CommIdx(j)]) {
225: comm_active[CommIdx(j)] = 1;
226: *newcomm = j;
227: return MPI_SUCCESS;
228: }
229: }
230: if (MaxComm >= MAX_COMM) return MPI_FAILURE;
231: *newcomm = ++MaxComm;
232: comm_active[CommIdx(*newcomm)] = 1;
233: return MPI_SUCCESS;
234: }
236: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
237: {
238: int j;
239: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
240: for (j=3; j<=MaxComm; j++) {
241: if (!comm_active[CommIdx(j)]) {
242: comm_active[CommIdx(j)] = 1;
243: *out = j;
244: return MPI_SUCCESS;
245: }
246: }
247: if (MaxComm >= MAX_COMM) return MPI_FAILURE;
248: *out = ++MaxComm;
249: comm_active[CommIdx(*out)] = 1;
250: return MPI_SUCCESS;
251: }
253: int MPI_Comm_free(MPI_Comm *comm)
254: {
255: int i;
256: int idx = CommIdx(*comm);
258: if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
259: for (i=0; i<num_attr; i++) {
260: if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
261: attr[idx][i].active = 0;
262: attr[idx][i].attribute_val = 0;
263: }
264: if (*comm >= 3) comm_active[idx] = 0;
265: *comm = 0;
266: return MPI_SUCCESS;
267: }
269: int MPI_Comm_size(MPI_Comm comm, int *size)
270: {
271: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
272: *size=1;
273: return MPI_SUCCESS;
274: }
276: int MPI_Comm_rank(MPI_Comm comm, int *rank)
277: {
278: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
279: *rank=0;
280: return MPI_SUCCESS;
281: }
283: int MPIUni_Abort(MPI_Comm comm,int errorcode)
284: {
285: printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
286: return MPI_ERR_NOSUPPORT;
287: }
289: int MPI_Abort(MPI_Comm comm,int errorcode)
290: {
291: abort();
292: return MPI_SUCCESS;
293: }
295: /* --------------------------------------------------------------------------*/
297: static int MPI_was_initialized = 0;
298: static int MPI_was_finalized = 0;
300: int MPI_Init(int *argc, char ***argv)
301: {
302: if (MPI_was_initialized) return MPI_FAILURE;
303: /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
304: MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
305: of MPIU so that they can do multiple PetscInitialize/Finalize().
306: */
307: /* if (MPI_was_finalized) return MPI_FAILURE; */
308: MPI_was_initialized = 1;
309: MPI_was_finalized = 0;
310: return MPI_SUCCESS;
311: }
313: int MPI_Init_thread(int *argc, char ***argv, int required, int* provided)
314: {
315: MPI_Query_thread(provided);
316: return MPI_Init(argc,argv);
317: }
319: int MPI_Query_thread(int* provided)
320: {
321: *provided = MPI_THREAD_FUNNELED;
322: return MPI_SUCCESS;
323: }
325: int MPI_Finalize(void)
326: {
327: MPI_Comm comm;
328: if (MPI_was_finalized) return MPI_FAILURE;
329: if (!MPI_was_initialized) return MPI_FAILURE;
330: comm = MPI_COMM_WORLD;
331: MPI_Comm_free(&comm);
332: comm = MPI_COMM_SELF;
333: MPI_Comm_free(&comm);
334: #if defined(PETSC_USE_DEBUG)
335: {
336: int i;
337: for (i=3; i<=MaxComm; i++) {
338: if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
339: }
340: }
341: #endif
342: /* reset counters */
343: MaxComm = 2;
344: num_attr = 1;
345: MPI_was_finalized = 1;
346: MPI_was_initialized = 0;
347: PETSC_COMM_WORLD = MPI_COMM_NULL;
348: return MPI_SUCCESS;
349: }
351: int MPI_Initialized(int *flag)
352: {
353: *flag = MPI_was_initialized;
354: return MPI_SUCCESS;
355: }
357: int MPI_Finalized(int *flag)
358: {
359: *flag = MPI_was_finalized;
360: return MPI_SUCCESS;
361: }
363: /* ------------------- Fortran versions of several routines ------------------ */
365: #if defined(PETSC_HAVE_FORTRAN_CAPS)
366: #define mpiunisetmoduleblock_ MPIUNISETMODULEBLOCK
367: #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
368: #define petsc_mpi_init_ PETSC_MPI_INIT
369: #define petsc_mpi_finalize_ PETSC_MPI_FINALIZE
370: #define petsc_mpi_comm_size_ PETSC_MPI_COMM_SIZE
371: #define petsc_mpi_comm_rank_ PETSC_MPI_COMM_RANK
372: #define petsc_mpi_abort_ PETSC_MPI_ABORT
373: #define petsc_mpi_reduce_ PETSC_MPI_REDUCE
374: #define petsc_mpi_allreduce_ PETSC_MPI_ALLREDUCE
375: #define petsc_mpi_barrier_ PETSC_MPI_BARRIER
376: #define petsc_mpi_bcast_ PETSC_MPI_BCAST
377: #define petsc_mpi_gather_ PETSC_MPI_GATHER
378: #define petsc_mpi_allgather_ PETSC_MPI_ALLGATHER
379: #define petsc_mpi_comm_split_ PETSC_MPI_COMM_SPLIT
380: #define petsc_mpi_scan_ PETSC_MPI_SCAN
381: #define petsc_mpi_send_ PETSC_MPI_SEND
382: #define petsc_mpi_recv_ PETSC_MPI_RECV
383: #define petsc_mpi_reduce_scatter_ PETSC_MPI_REDUCE_SCATTER
384: #define petsc_mpi_irecv_ PETSC_MPI_IRECV
385: #define petsc_mpi_isend_ PETSC_MPI_ISEND
386: #define petsc_mpi_sendrecv_ PETSC_MPI_SENDRECV
387: #define petsc_mpi_test_ PETSC_MPI_TEST
388: #define petsc_mpi_waitall_ PETSC_MPI_WAITALL
389: #define petsc_mpi_waitany_ PETSC_MPI_WAITANY
390: #define petsc_mpi_allgatherv_ PETSC_MPI_ALLGATHERV
391: #define petsc_mpi_alltoallv_ PETSC_MPI_ALLTOALLV
392: #define petsc_mpi_comm_create_ PETSC_MPI_COMM_CREATE
393: #define petsc_mpi_address_ PETSC_MPI_ADDRESS
394: #define petsc_mpi_pack_ PETSC_MPI_PACK
395: #define petsc_mpi_unpack_ PETSC_MPI_UNPACK
396: #define petsc_mpi_pack_size_ PETSC_MPI_PACK_SIZE
397: #define petsc_mpi_type_struct_ PETSC_MPI_TYPE_STRUCT
398: #define petsc_mpi_type_commit_ PETSC_MPI_TYPE_COMMIT
399: #define petsc_mpi_wtime_ PETSC_MPI_WTIME
400: #define petsc_mpi_cancel_ PETSC_MPI_CANCEL
401: #define petsc_mpi_comm_dup_ PETSC_MPI_COMM_DUP
402: #define petsc_mpi_comm_free_ PETSC_MPI_COMM_FREE
403: #define petsc_mpi_get_count_ PETSC_MPI_GET_COUNT
404: #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
405: #define petsc_mpi_initialized_ PETSC_MPI_INITIALIZED
406: #define petsc_mpi_iprobe_ PETSC_MPI_IPROBE
407: #define petsc_mpi_probe_ PETSC_MPI_PROBE
408: #define petsc_mpi_request_free_ PETSC_MPI_REQUEST_FREE
409: #define petsc_mpi_ssend_ PETSC_MPI_SSEND
410: #define petsc_mpi_wait_ PETSC_MPI_WAIT
411: #define petsc_mpi_comm_group_ PETSC_MPI_COMM_GROUP
412: #define petsc_mpi_exscan_ PETSC_MPI_EXSCAN
413: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
414: #define mpiunisetmoduleblock_ mpiunisetmoduleblock
415: #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
416: #define petsc_mpi_init_ petsc_mpi_init
417: #define petsc_mpi_finalize_ petsc_mpi_finalize
418: #define petsc_mpi_comm_size_ petsc_mpi_comm_size
419: #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank
420: #define petsc_mpi_abort_ petsc_mpi_abort
421: #define petsc_mpi_reduce_ petsc_mpi_reduce
422: #define petsc_mpi_allreduce_ petsc_mpi_allreduce
423: #define petsc_mpi_barrier_ petsc_mpi_barrier
424: #define petsc_mpi_bcast_ petsc_mpi_bcast
425: #define petsc_mpi_gather_ petsc_mpi_gather
426: #define petsc_mpi_allgather_ petsc_mpi_allgather
427: #define petsc_mpi_comm_split_ petsc_mpi_comm_split
428: #define petsc_mpi_scan_ petsc_mpi_scan
429: #define petsc_mpi_send_ petsc_mpi_send
430: #define petsc_mpi_recv_ petsc_mpi_recv
431: #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter
432: #define petsc_mpi_irecv_ petsc_mpi_irecv
433: #define petsc_mpi_isend_ petsc_mpi_isend
434: #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv
435: #define petsc_mpi_test_ petsc_mpi_test
436: #define petsc_mpi_waitall_ petsc_mpi_waitall
437: #define petsc_mpi_waitany_ petsc_mpi_waitany
438: #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv
439: #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv
440: #define petsc_mpi_comm_create_ petsc_mpi_comm_create
441: #define petsc_mpi_address_ petsc_mpi_address
442: #define petsc_mpi_pack_ petsc_mpi_pack
443: #define petsc_mpi_unpack_ petsc_mpi_unpack
444: #define petsc_mpi_pack_size_ petsc_mpi_pack_size
445: #define petsc_mpi_type_struct_ petsc_mpi_type_struct
446: #define petsc_mpi_type_commit_ petsc_mpi_type_commit
447: #define petsc_mpi_wtime_ petsc_mpi_wtime
448: #define petsc_mpi_cancel_ petsc_mpi_cancel
449: #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup
450: #define petsc_mpi_comm_free_ petsc_mpi_comm_free
451: #define petsc_mpi_get_count_ petsc_mpi_get_count
452: #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
453: #define petsc_mpi_initialized_ petsc_mpi_initialized
454: #define petsc_mpi_iprobe_ petsc_mpi_iprobe
455: #define petsc_mpi_probe_ petsc_mpi_probe
456: #define petsc_mpi_request_free_ petsc_mpi_request_free
457: #define petsc_mpi_ssend_ petsc_mpi_ssend
458: #define petsc_mpi_wait_ petsc_mpi_wait
459: #define petsc_mpi_comm_group_ petsc_mpi_comm_group
460: #define petsc_mpi_exscan_ petsc_mpi_exscan
461: #endif
463: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
464: #define petsc_mpi_init_ petsc_mpi_init__
465: #define petsc_mpi_finalize_ petsc_mpi_finalize__
466: #define petsc_mpi_comm_size_ petsc_mpi_comm_size__
467: #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank__
468: #define petsc_mpi_abort_ petsc_mpi_abort__
469: #define petsc_mpi_reduce_ petsc_mpi_reduce__
470: #define petsc_mpi_allreduce_ petsc_mpi_allreduce__
471: #define petsc_mpi_barrier_ petsc_mpi_barrier__
472: #define petsc_mpi_bcast_ petsc_mpi_bcast__
473: #define petsc_mpi_gather_ petsc_mpi_gather__
474: #define petsc_mpi_allgather_ petsc_mpi_allgather__
475: #define petsc_mpi_comm_split_ petsc_mpi_comm_split__
476: #define petsc_mpi_scan_ petsc_mpi_scan__
477: #define petsc_mpi_send_ petsc_mpi_send__
478: #define petsc_mpi_recv_ petsc_mpi_recv__
479: #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter__
480: #define petsc_mpi_irecv_ petsc_mpi_irecv__
481: #define petsc_mpi_isend_ petsc_mpi_isend__
482: #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv__
483: #define petsc_mpi_test_ petsc_mpi_test__
484: #define petsc_mpi_waitall_ petsc_mpi_waitall__
485: #define petsc_mpi_waitany_ petsc_mpi_waitany__
486: #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv__
487: #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv__
488: #define petsc_mpi_comm_create_ petsc_mpi_comm_create__
489: #define petsc_mpi_address_ petsc_mpi_address__
490: #define petsc_mpi_pack_ petsc_mpi_pack__
491: #define petsc_mpi_unpack_ petsc_mpi_unpack__
492: #define petsc_mpi_pack_size_ petsc_mpi_pack_size__
493: #define petsc_mpi_type_struct_ petsc_mpi_type_struct__
494: #define petsc_mpi_type_commit_ petsc_mpi_type_commit__
495: #define petsc_mpi_wtime_ petsc_mpi_wtime__
496: #define petsc_mpi_cancel_ petsc_mpi_cancel__
497: #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup__
498: #define petsc_mpi_comm_free_ petsc_mpi_comm_free__
499: #define petsc_mpi_get_count_ petsc_mpi_get_count__
500: #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
501: #define petsc_mpi_initialized_ petsc_mpi_initialized__
502: #define petsc_mpi_iprobe_ petsc_mpi_iprobe__
503: #define petsc_mpi_probe_ petsc_mpi_probe__
504: #define petsc_mpi_request_free_ petsc_mpi_request_free__
505: #define petsc_mpi_ssend_ petsc_mpi_ssend__
506: #define petsc_mpi_wait_ petsc_mpi_wait__
507: #define petsc_mpi_comm_group_ petsc_mpi_comm_group__
508: #define petsc_mpi_exscan_ petsc_mpi_exscan__
509: #endif
511: /* Do not build fortran interface if MPI namespace colision is to be avoided */
512: #if defined(PETSC_HAVE_FORTRAN)
514: PETSC_EXTERN void mpiunisetmoduleblock_(void);
516: PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
517: {
518: MPIUNIF_mpi_in_place = f_mpi_in_place;
519: }
521: PETSC_EXTERN void petsc_mpi_init_(int *ierr)
522: {
523: mpiunisetmoduleblock_();
524: *MPI_Init((int*)0, (char***)0);
525: }
527: PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
528: {
529: *MPI_Finalize();
530: }
532: PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
533: {
534: *size = 1;
535: *0;
536: }
538: PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
539: {
540: *rank = 0;
541: *MPI_SUCCESS;
542: }
544: PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
545: {
546: *newcomm = *comm;
547: *MPI_SUCCESS;
548: }
550: PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
551: {
552: abort();
553: *MPI_SUCCESS;
554: }
556: PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
557: {
558: *MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
559: }
561: PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
562: {
563: *MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
564: }
566: PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
567: {
568: *MPI_SUCCESS;
569: }
571: PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
572: {
573: *MPI_SUCCESS;
574: }
576: PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
577: {
578: *MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
579: }
581: PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
582: {
583: *MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
584: }
586: PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
587: {
588: *MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
589: }
591: PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
592: {
593: *MPIUni_Abort(MPI_COMM_WORLD,0);
594: }
596: PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
597: {
598: *MPIUni_Abort(MPI_COMM_WORLD,0);
599: }
601: PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
602: {
603: *MPIUni_Abort(MPI_COMM_WORLD,0);
604: }
606: PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
607: {
608: *MPIUni_Abort(MPI_COMM_WORLD,0);
609: }
611: PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
612: {
613: *MPIUni_Abort(MPI_COMM_WORLD,0);
614: }
616: PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
617: {
618: *MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
619: }
621: PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
622: {
623: *MPIUni_Abort(MPI_COMM_WORLD,0);
624: }
626: PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
627: {
628: *MPI_SUCCESS;
629: }
631: PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
632: {
633: *MPI_SUCCESS;
634: }
636: PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
637: {
638: *MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
639: }
641: PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
642: {
643: *MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
644: }
646: PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
647: {
648: *newcomm = *comm;
649: *MPI_SUCCESS;
650: }
652: PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
653: {
654: *address = (MPI_Aint) ((char *)location);
655: *MPI_SUCCESS;
656: }
658: PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
659: {
660: *MPIUni_Abort(MPI_COMM_WORLD,0);
661: }
663: PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
664: {
665: *MPIUni_Abort(MPI_COMM_WORLD,0);
666: }
668: PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
669: {
670: *MPIUni_Abort(MPI_COMM_WORLD,0);
671: }
673: PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
674: {
675: *MPIUni_Abort(MPI_COMM_WORLD,0);
676: }
678: PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
679: {
680: *MPI_SUCCESS;
681: }
683: double petsc_mpi_wtime_(void)
684: {
685: return 0.0;
686: }
688: PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
689: {
690: *MPI_SUCCESS;
691: }
693: PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
694: {
695: *out = *comm;
696: *MPI_SUCCESS;
697: }
699: PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
700: {
701: *MPI_SUCCESS;
702: }
704: PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
705: {
706: *MPIUni_Abort(MPI_COMM_WORLD,0);
707: }
709: PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
710: {
711: MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
712: *result_len = 9;
713: *MPI_SUCCESS;
714: }
716: PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
717: {
718: *flag = MPI_was_initialized;
719: *MPI_SUCCESS;
720: }
722: PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
723: {
724: *MPI_SUCCESS;
725: }
727: PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
728: {
729: *MPI_SUCCESS;
730: }
732: PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
733: {
734: *MPI_SUCCESS;
735: }
737: PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
738: {
739: *MPIUni_Abort(MPI_COMM_WORLD,0);
740: }
742: PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
743: {
744: *MPI_SUCCESS;
745: }
747: PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
748: {
749: *MPI_SUCCESS;
750: }
752: PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
753: {
754: *MPI_SUCCESS;
755: }
757: #endif /* PETSC_HAVE_FORTRAN */
759: #if defined(__cplusplus)
760: }
761: #endif