Actual source code: mpi.h
1: /*
2: This is a special set of bindings for uni-processor use of MPI by the PETSc library.
4: NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
6: For example,
7: * Does not implement send to self.
8: * Does not implement attributes correctly.
9: */
11: /*
12: The following info is a response to one of the petsc-maint questions
13: regarding MPIUNI.
15: MPIUNI was developed with the aim of getting PETSc compiled, and
16: usable in the absence of a full MPI implementation. With this, we
17: were able to provide PETSc on Windows, Windows64 even before any MPI
18: implementation was available on these platforms. [Or with certain
19: compilers - like borland, that do not have a usable MPI
20: implementation]
22: However - providing a sequential, standards compliant MPI
23: implementation is *not* the goal of MPIUNI. The development strategy
24: was - to make enough changes to it so that PETSc sources, examples
25: compile without errors, and runs in the uni-processor mode. This is
26: the reason each function is not documented.
28: PETSc usage of MPIUNI is primarily from C. However a minimal fortran
29: interface is also provided - to get PETSc fortran examples with a
30: few MPI calls working.
32: One of the optimzation with MPIUNI, is to avoid the function call
33: overhead, when possible. Hence most of the C functions are
34: implemented as macros. However the function calls cannot be avoided
35: with fortran usage.
37: Most PETSc objects have both sequential and parallel
38: implementations, which are separate. For eg: We have two types of
39: sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
40: routines are used in the Seq part, but most of them are used in the
41: MPI part. The send/receive calls can be found mostly in the MPI
42: part.
44: When MPIUNI is used, only the Seq version of the PETSc objects are
45: used, even though the MPI variant of the objects are compiled. Since
46: there are no send/receive calls in the Seq variant, PETSc works fine
47: with MPIUNI in seq mode.
49: The reason some send/receive functions are defined to abort(), is to
50: detect sections of code that use send/receive functions, and gets
51: executed in the sequential mode. (which shouldn't happen in case of
52: PETSc).
54: Proper implementation of send/receive would involve writing a
55: function for each of them. Inside each of these functions, we have
56: to check if the send is to self or receive is from self, and then
57: doing the buffering accordingly (until the receive is called) - or
58: what if a nonblocking receive is called, do a copy etc.. Handling
59: the buffering aspects might be complicated enough, that in this
60: case, a proper implementation of MPI might as well be used. This is
61: the reason the send to self is not implemented in MPIUNI, and never
62: will be.
64: Proper implementations of MPI [for eg: MPICH & OpenMPI] are
65: available for most machines. When these packages are available, Its
66: generally preferable to use one of them instead of MPIUNI - even if
67: the user is using PETSc sequentially.
69: - MPIUNI does not support all MPI functions [or functionality].
70: Hence it might not work with external packages or user code that
71: might have MPI calls in it.
73: - MPIUNI is not a standards compliant implementation for np=1.
74: For eg: if the user code has send/recv to self, then it will
75: abort. [Similar issues with a number of other MPI functionality]
76: However MPICH & OpenMPI are the correct implementations of MPI
77: standard for np=1.
79: - When user code uses multiple MPI based packages that have their
80: own *internal* stubs equivalent to MPIUNI - in sequential mode,
81: invariably these multiple implementations of MPI for np=1 conflict
82: with each other. The correct thing to do is: make all such
83: packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
84: satisfy this requirement correctly [and hence the correct choice].
86: - Using MPICH/OpenMPI sequentially should have minimal
87: disadvantages. [for eg: these binaries can be run without
88: mpirun/mpiexec as ./executable, without requiring any extra
89: configurations for ssh/rsh/daemons etc..]. This should not be a
90: reason to avoid these packages for sequential use.
92: */
94: #if !defined(MPIUNI_H)
95: #define MPIUNI_H
97: /* Required by abort() in mpi.c & for win64 */
98: #include <petscconf.h>
99: #include <stddef.h>
101: /* This is reproduced from petscsys.h so that mpi.h can be used standalone without first including petscsys.h */
102: #if defined(_WIN32) && defined(PETSC_USE_SHARED_LIBRARIES)
103: # define MPIUni_ __declspec(dllexport)
104: # define MPIUni_PETSC_DLLIMPORT __declspec(dllimport)
105: #elif defined(PETSC_USE_VISIBILITY_CXX) && defined(__cplusplus)
106: # define MPIUni_ __attribute__((visibility ("default")))
107: # define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
108: #elif defined(PETSC_USE_VISIBILITY_C) && !defined(__cplusplus)
109: # define MPIUni_ __attribute__((visibility ("default")))
110: # define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
111: #else
112: # define MPIUni_
113: # define MPIUni_PETSC_DLLIMPORT
114: #endif
116: #if defined(petsc_EXPORTS)
117: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_
118: #else /* Win32 users need this to import symbols from petsc.dll */
119: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_PETSC_DLLIMPORT
120: #endif
122: #if defined(__cplusplus)
123: #define MPIUni_PETSC_EXTERN extern "C" MPIUni_PETSC_VISIBILITY_PUBLIC
124: #else
125: #define MPIUni_PETSC_EXTERN extern MPIUni_PETSC_VISIBILITY_PUBLIC
126: #endif
128: #if defined(__cplusplus)
129: extern "C" {
130: #endif
132: /* MPI_Aint has to be a signed integral type large enough to hold a pointer */
133: typedef ptrdiff_t MPI_Aint;
135: /* old 32bit MS compiler does not support long long */
136: #if defined(PETSC_SIZEOF_LONG_LONG)
137: typedef long long MPIUNI_INT64;
138: typedef unsigned long long MPIUNI_UINT64;
139: #elif defined(PETSC_HAVE___INT64)
140: typedef _int64 MPIUNI_INT64;
141: typedef unsigned _int64 MPIUNI_UINT64;
142: #else
143: #error "cannot determine MPIUNI_INT64, MPIUNI_UINT64 types"
144: #endif
146: /*
148: MPIUNI_ARG is used in the macros below only to stop various C/C++ compilers
149: from generating warning messages about unused variables while compiling PETSc.
150: */
151: MPIUni_PETSC_EXTERN void *MPIUNI_TMP;
152: #define MPIUNI_ARG(arg) (MPIUNI_TMP = (void *)(MPI_Aint) (arg))
154: #define MPI_IDENT 0
155: #define MPI_CONGRUENT 1
156: #define MPI_SIMILAR 2
157: #define MPI_UNEQUAL 3
159: #define MPI_BOTTOM ((void *) 0)
160: #define MPI_IN_PLACE ((void *)-1)
162: #define MPI_PROC_NULL (-1)
163: #define MPI_ANY_SOURCE (-2)
164: #define MPI_ANY_TAG (-1)
165: #define MPI_UNDEFINED (-32766)
167: #define MPI_SUCCESS 0
168: #define MPI_ERR_OTHER 17
169: #define MPI_ERR_UNKNOWN 18
170: #define MPI_ERR_INTERN 21
171: #define MPI_ERR_NOSUPPORT 22
173: #define MPI_KEYVAL_INVALID 0
174: #define MPI_TAG_UB 0
176: #define MPI_MAX_PROCESSOR_NAME 1024
177: #define MPI_MAX_ERROR_STRING 2056
179: typedef int MPI_Comm;
180: #define MPI_COMM_NULL 0
181: #define MPI_COMM_SELF 1
182: #define MPI_COMM_WORLD 2
183: #define MPI_COMM_TYPE_SHARED 1
185: typedef int MPI_Info;
186: #define MPI_INFO_NULL 0
188: typedef struct {int MPI_SOURCE,MPI_TAG,MPI_ERROR;} MPI_Status;
189: #define MPI_STATUS_IGNORE (MPI_Status *)0
190: #define MPI_STATUSES_IGNORE (MPI_Status *)0
192: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
193: /* Any changes here must also be reflected in mpif.h */
194: typedef int MPI_Datatype;
195: #define MPI_DATATYPE_NULL 0
196: #define MPI_PACKED 0
198: #define MPI_FLOAT (1 << 20 | 1 << 8 | (int)sizeof(float))
199: #define MPI_DOUBLE (1 << 20 | 1 << 8 | (int)sizeof(double))
200: #define MPI_LONG_DOUBLE (1 << 20 | 1 << 8 | (int)sizeof(long double))
202: #define MPI_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
203: #define MPI_C_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
204: #define MPI_C_FLOAT_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
205: #define MPI_DOUBLE_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
206: #define MPI_C_DOUBLE_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
208: #define MPI_CHAR (3 << 20 | 1 << 8 | (int)sizeof(char))
209: #define MPI_BYTE (3 << 20 | 1 << 8 | (int)sizeof(char))
210: #define MPI_SIGNED_CHAR (3 << 20 | 1 << 8 | (int)sizeof(signed char))
211: #define MPI_UNSIGNED_CHAR (3 << 20 | 1 << 8 | (int)sizeof(unsigned char))
213: #define MPI_SHORT (4 << 20 | 1 << 8 | (int)sizeof(short))
214: #define MPI_INT (4 << 20 | 1 << 8 | (int)sizeof(int))
215: #define MPI_LONG (4 << 20 | 1 << 8 | (int)sizeof(long))
216: #define MPI_LONG_LONG (4 << 20 | 1 << 8 | (int)sizeof(MPIUNI_INT64))
217: #define MPI_LONG_LONG_INT MPI_LONG_LONG
218: #define MPI_INTEGER8 MPI_LONG_LONG
219: #define MPI_INT8_T (5 << 20 | 1 << 8 | (int)sizeof(int8_t))
220: #define MPI_INT16_T (5 << 20 | 1 << 8 | (int)sizeof(int16_t))
221: #define MPI_INT32_T (5 << 20 | 1 << 8 | (int)sizeof(int32_t))
223: #define MPI_UNSIGNED_SHORT (5 << 20 | 1 << 8 | (int)sizeof(unsigned short))
224: #define MPI_UNSIGNED (5 << 20 | 1 << 8 | (int)sizeof(unsigned))
225: #define MPI_UNSIGNED_LONG (5 << 20 | 1 << 8 | (int)sizeof(unsigned long))
226: #define MPI_UNSIGNED_LONG_LONG (5 << 20 | 1 << 8 | (int)sizeof(MPIUNI_UINT64))
228: #define MPI_FLOAT_INT (10 << 20 | 1 << 8 | (int)(sizeof(float) + sizeof(int)))
229: #define MPI_DOUBLE_INT (11 << 20 | 1 << 8 | (int)(sizeof(double) + sizeof(int)))
230: #define MPI_LONG_INT (12 << 20 | 1 << 8 | (int)(sizeof(long) + sizeof(int)))
231: #define MPI_SHORT_INT (13 << 20 | 1 << 8 | (int)(sizeof(short) + sizeof(int)))
232: #define MPI_2INT (14 << 20 | 1 << 8 | (int)(2*sizeof(int)))
233: #define MPI_2DOUBLE (15 << 20 | 1 << 8 | (int)(2*sizeof(double)))
235: /* Fortran datatypes; Jed Brown says they should be defined here */
236: #define MPI_INTEGER MPI_INT
237: #define MPI_DOUBLE_PRECISION MPI_DOUBLE
238: #define MPI_COMPLEX16 MPI_C_DOUBLE_COMPLEX
239: #define MPI_2DOUBLE_PRECISION MPI_2DOUBLE
241: #define MPI_ORDER_C 0
242: #define MPI_ORDER_FORTRAN 1
244: #define MPI_sizeof_default(datatype) ((((datatype) >> 8) & 0xfff) * ((datatype) & 0xff))
245: #if defined(PETSC_USE_REAL___FP16)
246: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FP16;
247: #define MPI_sizeof(datatype) ((datatype == MPIU___FP16) ? (int)(2*sizeof(char)) : MPI_sizeof_default(datatype))
248: #elif defined(PETSC_USE_REAL___FLOAT128)
249: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FLOAT128;
250: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? (int)(2*sizeof(double)) : MPI_sizeof_default(datatype))
251: #else
252: #define MPI_sizeof(datatype) (MPI_sizeof_default(datatype))
253: #endif
255: MPIUni_PETSC_EXTERN int MPIUNI_Memcpy(void*,const void*,int);
257: typedef int MPI_Request;
258: #define MPI_REQUEST_NULL 0
260: typedef int MPI_Group;
261: #define MPI_GROUP_NULL 0
262: #define MPI_GROUP_EMPTY 0
264: typedef int MPI_Op;
265: #define MPI_OP_NULL 0
266: #define MPI_SUM 1
267: #define MPI_MAX 2
268: #define MPI_MIN 3
269: #define MPI_REPLACE 4
270: #define MPI_PROD 5
271: #define MPI_LAND 6
272: #define MPI_BAND 7
273: #define MPI_LOR 8
274: #define MPI_BOR 9
275: #define MPI_LXOR 10
276: #define MPI_BXOR 11
277: #define MPI_MAXLOC 12
278: #define MPI_MINLOC 13
280: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
282: typedef int MPI_Errhandler;
283: #define MPI_ERRHANDLER_NULL 0
284: #define MPI_ERRORS_RETURN 0
285: #define MPI_ERRORS_ARE_FATAL 0
286: #define MPI_ERR_LASTCODE 0x3fffffff
287: typedef void (MPI_Handler_function)(MPI_Comm *, int *, ...);
289: /*
290: Prototypes of some functions which are implemented in mpi.c
291: */
292: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
293: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
294: #define MPI_NULL_COPY_FN (MPI_Copy_function*)0
295: #define MPI_NULL_DELETE_FN (MPI_Delete_function*)0
297: #define MPI_THREAD_SINGLE 0
298: #define MPI_THREAD_FUNNELED 1
299: #define MPI_THREAD_SERIALIZED 2
300: #define MPI_THREAD_MULTIPLE 3
302: /*
303: To enable linking PETSc+MPIUNI with any other package that might have its
304: own MPIUNI (equivalent implementation) we need to avoid using 'MPI'
305: namespace for MPIUNI functions that go into the petsc library.
307: For C functions below (that get compiled into petsc library) - we map
308: the 'MPI' functions to use 'Petsc_MPI' namespace.
310: With fortran we use similar mapping - thus requiring the use of
311: c-preprocessor with mpif.h
312: */
313: #define MPI_Abort Petsc_MPI_Abort
314: #define MPIUni_Abort Petsc_MPIUni_Abort
315: #define MPI_Attr_get Petsc_MPI_Attr_get
316: #define MPI_Keyval_free Petsc_MPI_Keyval_free
317: #define MPI_Attr_put Petsc_MPI_Attr_put
318: #define MPI_Attr_delete Petsc_MPI_Attr_delete
319: #define MPI_Keyval_create Petsc_MPI_Keyval_create
320: #define MPI_Comm_free Petsc_MPI_Comm_free
321: #define MPI_Comm_dup Petsc_MPI_Comm_dup
322: #define MPI_Comm_create Petsc_MPI_Comm_create
323: #define MPI_Init Petsc_MPI_Init
324: #define MPI_Init_thread Petsc_MPI_Init_thread
325: #define MPI_Query_thread Petsc_MPI_Query_thread
326: #define MPI_Finalize Petsc_MPI_Finalize
327: #define MPI_Initialized Petsc_MPI_Initialized
328: #define MPI_Finalized Petsc_MPI_Finalized
329: #define MPI_Comm_size Petsc_MPI_Comm_size
330: #define MPI_Comm_rank Petsc_MPI_Comm_rank
331: #define MPI_Wtime Petsc_MPI_Wtime
332: #define MPI_Type_get_envelope Petsc_MPI_Type_get_envelope
333: #define MPI_Type_get_contents Petsc_MPI_Type_get_contents
334: #define MPI_Add_error_class Petsc_MPI_Add_error_class
335: #define MPI_Add_error_code Petsc_MPI_Add_error_code
337: /* identical C bindings */
338: #define MPI_Comm_copy_attr_function MPI_Copy_function
339: #define MPI_Comm_delete_attr_function MPI_Delete_function
340: #define MPI_COMM_NULL_COPY_FN MPI_NULL_COPY_FN
341: #define MPI_COMM_NULL_DELETE_FN MPI_NULL_DELETE_FN
342: #define MPI_Comm_create_keyval Petsc_MPI_Keyval_create
343: #define MPI_Comm_free_keyval Petsc_MPI_Keyval_free
344: #define MPI_Comm_get_attr Petsc_MPI_Attr_get
345: #define MPI_Comm_set_attr Petsc_MPI_Attr_put
346: #define MPI_Comm_delete_attr Petsc_MPI_Attr_delete
348: MPIUni_PETSC_EXTERN int MPIUni_Abort(MPI_Comm,int);
349: MPIUni_PETSC_EXTERN int MPI_Abort(MPI_Comm,int);
350: MPIUni_PETSC_EXTERN int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
351: MPIUni_PETSC_EXTERN int MPI_Keyval_free(int*);
352: MPIUni_PETSC_EXTERN int MPI_Attr_put(MPI_Comm,int,void *);
353: MPIUni_PETSC_EXTERN int MPI_Attr_delete(MPI_Comm,int);
354: MPIUni_PETSC_EXTERN int MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
355: MPIUni_PETSC_EXTERN int MPI_Comm_free(MPI_Comm*);
356: MPIUni_PETSC_EXTERN int MPI_Comm_dup(MPI_Comm,MPI_Comm *);
357: MPIUni_PETSC_EXTERN int MPI_Comm_create(MPI_Comm,MPI_Group,MPI_Comm *);
358: MPIUni_PETSC_EXTERN int MPI_Init(int *, char ***);
359: MPIUni_PETSC_EXTERN int MPI_Init_thread(int *, char ***, int, int *);
360: MPIUni_PETSC_EXTERN int MPI_Query_thread(int *);
361: MPIUni_PETSC_EXTERN int MPI_Finalize(void);
362: MPIUni_PETSC_EXTERN int MPI_Initialized(int*);
363: MPIUni_PETSC_EXTERN int MPI_Finalized(int*);
364: MPIUni_PETSC_EXTERN int MPI_Comm_size(MPI_Comm,int*);
365: MPIUni_PETSC_EXTERN int MPI_Comm_rank(MPI_Comm,int*);
366: MPIUni_PETSC_EXTERN double MPI_Wtime(void);
368: MPIUni_PETSC_EXTERN int MPI_Type_get_envelope(MPI_Datatype,int*,int*,int*,int*);
369: MPIUni_PETSC_EXTERN int MPI_Type_get_contents(MPI_Datatype,int,int,int,int*,MPI_Aint*,MPI_Datatype*);
370: MPIUni_PETSC_EXTERN int MPI_Add_error_class(int*);
371: MPIUni_PETSC_EXTERN int MPI_Add_error_code(int,int*);
373: /*
374: Routines we have replace with macros that do nothing
375: Some return error codes others return success
376: */
378: typedef int MPI_Fint;
379: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
380: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
381: #define MPI_Type_f2c(type) (MPI_Datatype)(type)
382: #define MPI_Type_c2f(type) (MPI_Fint)(type)
383: #define MPI_Op_f2c(op) (MPI_Op)(op)
384: #define MPI_Op_c2f(op) (MPI_Fint)(op)
386: #define MPI_Send(buf,count,datatype,dest,tag,comm) \
387: (MPIUNI_ARG(buf),\
388: MPIUNI_ARG(count),\
389: MPIUNI_ARG(datatype),\
390: MPIUNI_ARG(dest),\
391: MPIUNI_ARG(tag),\
392: MPIUNI_ARG(comm),\
393: MPIUni_Abort(MPI_COMM_WORLD,0))
394: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
395: (MPIUNI_ARG(buf),\
396: MPIUNI_ARG(count),\
397: MPIUNI_ARG(datatype),\
398: MPIUNI_ARG(source),\
399: MPIUNI_ARG(tag),\
400: MPIUNI_ARG(comm),\
401: MPIUNI_ARG(status),\
402: MPIUni_Abort(MPI_COMM_WORLD,0))
403: #define MPI_Get_count(status,datatype,count) \
404: (MPIUNI_ARG(status),\
405: MPIUNI_ARG(datatype),\
406: MPIUNI_ARG(count),\
407: MPIUni_Abort(MPI_COMM_WORLD,0))
408: #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
409: (MPIUNI_ARG(buf),\
410: MPIUNI_ARG(count),\
411: MPIUNI_ARG(datatype),\
412: MPIUNI_ARG(dest),\
413: MPIUNI_ARG(tag),\
414: MPIUNI_ARG(comm),\
415: MPIUni_Abort(MPI_COMM_WORLD,0))
416: #define MPI_Ssend(buf,count,datatype,dest,tag,comm) \
417: (MPIUNI_ARG(buf),\
418: MPIUNI_ARG(count),\
419: MPIUNI_ARG(datatype),\
420: MPIUNI_ARG(dest),\
421: MPIUNI_ARG(tag),\
422: MPIUNI_ARG(comm),\
423: MPIUni_Abort(MPI_COMM_WORLD,0))
424: #define MPI_Rsend(buf,count,datatype,dest,tag,comm) \
425: (MPIUNI_ARG(buf),\
426: MPIUNI_ARG(count),\
427: MPIUNI_ARG(datatype),\
428: MPIUNI_ARG(dest),\
429: MPIUNI_ARG(tag),\
430: MPIUNI_ARG(comm),\
431: MPIUni_Abort(MPI_COMM_WORLD,0))
432: #define MPI_Buffer_attach(buffer,size) \
433: (MPIUNI_ARG(buffer),\
434: MPIUNI_ARG(size),\
435: MPI_SUCCESS)
436: #define MPI_Buffer_detach(buffer,size)\
437: (MPIUNI_ARG(buffer),\
438: MPIUNI_ARG(size),\
439: MPI_SUCCESS)
440: #define MPI_Ibsend(buf,count,datatype,dest,tag,comm,request) \
441: (MPIUNI_ARG(buf),\
442: MPIUNI_ARG(count),\
443: MPIUNI_ARG(datatype),\
444: MPIUNI_ARG(dest),\
445: MPIUNI_ARG(tag),\
446: MPIUNI_ARG(comm),\
447: MPIUNI_ARG(request),\
448: MPIUni_Abort(MPI_COMM_WORLD,0))
449: #define MPI_Issend(buf,count,datatype,dest,tag,comm,request) \
450: (MPIUNI_ARG(buf),\
451: MPIUNI_ARG(count),\
452: MPIUNI_ARG(datatype),\
453: MPIUNI_ARG(dest),\
454: MPIUNI_ARG(tag),\
455: MPIUNI_ARG(comm),\
456: MPIUNI_ARG(request),\
457: MPIUni_Abort(MPI_COMM_WORLD,0))
458: #define MPI_Irsend(buf,count,datatype,dest,tag,comm,request) \
459: (MPIUNI_ARG(buf),\
460: MPIUNI_ARG(count),\
461: MPIUNI_ARG(datatype),\
462: MPIUNI_ARG(dest),\
463: MPIUNI_ARG(tag),\
464: MPIUNI_ARG(comm),\
465: MPIUNI_ARG(request),\
466: MPIUni_Abort(MPI_COMM_WORLD,0))
467: #define MPI_Irecv(buf,count,datatype,source,tag,comm,request) \
468: (MPIUNI_ARG(buf),\
469: MPIUNI_ARG(count),\
470: MPIUNI_ARG(datatype),\
471: MPIUNI_ARG(source),\
472: MPIUNI_ARG(tag),\
473: MPIUNI_ARG(comm),\
474: MPIUNI_ARG(request),\
475: MPIUni_Abort(MPI_COMM_WORLD,0))
476: #define MPI_Isend(buf,count,datatype,dest,tag,comm,request) \
477: (MPIUNI_ARG(buf),\
478: MPIUNI_ARG(count),\
479: MPIUNI_ARG(datatype),\
480: MPIUNI_ARG(dest),\
481: MPIUNI_ARG(tag),\
482: MPIUNI_ARG(comm),\
483: MPIUNI_ARG(request),\
484: MPIUni_Abort(MPI_COMM_WORLD,0))
485: #define MPI_Wait(request,status) \
486: (MPIUNI_ARG(request),\
487: MPIUNI_ARG(status),\
488: MPI_SUCCESS)
489: #define MPI_Test(request,flag,status) \
490: (MPIUNI_ARG(request),\
491: MPIUNI_ARG(status),\
492: *(flag) = 0,\
493: MPI_SUCCESS)
494: #define MPI_Request_free(request) \
495: (MPIUNI_ARG(request),\
496: MPI_SUCCESS)
497: #define MPI_Waitany(count,array_of_requests,index,status) \
498: (MPIUNI_ARG(count),\
499: MPIUNI_ARG(array_of_requests),\
500: MPIUNI_ARG(status),\
501: (*(status)).MPI_SOURCE = 0, \
502: *(index) = 0,\
503: MPI_SUCCESS)
504: #define MPI_Testany(a,b,c,d,e) \
505: (MPIUNI_ARG(a),\
506: MPIUNI_ARG(b),\
507: MPIUNI_ARG(c),\
508: MPIUNI_ARG(d),\
509: MPIUNI_ARG(e),\
510: MPI_SUCCESS)
511: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
512: (MPIUNI_ARG(count),\
513: MPIUNI_ARG(array_of_requests),\
514: MPIUNI_ARG(array_of_statuses),\
515: MPI_SUCCESS)
516: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
517: (MPIUNI_ARG(count),\
518: MPIUNI_ARG(array_of_requests),\
519: MPIUNI_ARG(flag),\
520: MPIUNI_ARG(array_of_statuses),\
521: MPI_SUCCESS)
522: #define MPI_Waitsome(incount,array_of_requests,outcount,\
523: array_of_indices,array_of_statuses) \
524: (MPIUNI_ARG(incount),\
525: MPIUNI_ARG(array_of_requests),\
526: MPIUNI_ARG(outcount),\
527: MPIUNI_ARG(array_of_indices),\
528: MPIUNI_ARG(array_of_statuses),\
529: MPI_SUCCESS)
530: #define MPI_Comm_group(comm,group) \
531: (MPIUNI_ARG(comm),\
532: *group = 1,\
533: MPI_SUCCESS)
534: #define MPI_Group_excl(group,n,ranks,newgroup) \
535: (MPIUNI_ARG(group),\
536: MPIUNI_ARG(n),\
537: MPIUNI_ARG(ranks),\
538: MPIUNI_ARG(newgroup),\
539: MPI_SUCCESS)
540: #define MPI_Group_incl(group,n,ranks,newgroup) \
541: (MPIUNI_ARG(group),\
542: MPIUNI_ARG(n),\
543: MPIUNI_ARG(ranks),\
544: MPIUNI_ARG(newgroup),\
545: MPI_SUCCESS)
546: #define MPI_Testsome(incount,array_of_requests,outcount,\
547: array_of_indices,array_of_statuses) \
548: (MPIUNI_ARG(incount),\
549: MPIUNI_ARG(array_of_requests),\
550: MPIUNI_ARG(outcount),\
551: MPIUNI_ARG(array_of_indices),\
552: MPIUNI_ARG(array_of_statuses),\
553: MPI_SUCCESS)
554: #define MPI_Iprobe(source,tag,comm,flag,status) \
555: (MPIUNI_ARG(source),\
556: MPIUNI_ARG(tag),\
557: MPIUNI_ARG(comm),\
558: *(flag)=0,\
559: MPIUNI_ARG(status),\
560: MPI_SUCCESS)
561: #define MPI_Probe(source,tag,comm,status) \
562: (MPIUNI_ARG(source),\
563: MPIUNI_ARG(tag),\
564: MPIUNI_ARG(comm),\
565: MPIUNI_ARG(status),\
566: MPI_SUCCESS)
567: #define MPI_Cancel(request) \
568: (MPIUNI_ARG(request),\
569: MPI_SUCCESS)
570: #define MPI_Test_cancelled(status,flag) \
571: (MPIUNI_ARG(status),\
572: *(flag)=0,\
573: MPI_SUCCESS)
574: #define MPI_Send_init(buf,count,datatype,dest,tag,comm,request) \
575: (MPIUNI_ARG(buf),\
576: MPIUNI_ARG(count),\
577: MPIUNI_ARG(datatype),\
578: MPIUNI_ARG(dest),\
579: MPIUNI_ARG(tag),\
580: MPIUNI_ARG(comm),\
581: MPIUNI_ARG(request),\
582: MPI_SUCCESS)
583: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
584: (MPIUNI_ARG(buf),\
585: MPIUNI_ARG(count),\
586: MPIUNI_ARG(datatype),\
587: MPIUNI_ARG(dest),\
588: MPIUNI_ARG(tag),\
589: MPIUNI_ARG(comm),\
590: MPIUNI_ARG(request),\
591: MPI_SUCCESS)
592: #define MPI_Ssend_init(buf,count,datatype,dest,tag,comm,request) \
593: (MPIUNI_ARG(buf),\
594: MPIUNI_ARG(count),\
595: MPIUNI_ARG(datatype),\
596: MPIUNI_ARG(dest),\
597: MPIUNI_ARG(tag),\
598: MPIUNI_ARG(comm),\
599: MPIUNI_ARG(request),\
600: MPI_SUCCESS)
601: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
602: (MPIUNI_ARG(buf),\
603: MPIUNI_ARG(count),\
604: MPIUNI_ARG(datatype),\
605: MPIUNI_ARG(dest),\
606: MPIUNI_ARG(tag),\
607: MPIUNI_ARG(comm),\
608: MPIUNI_ARG(request),\
609: MPI_SUCCESS)
610: #define MPI_Rsend_init(buf,count,datatype,dest,tag,comm,request) \
611: (MPIUNI_ARG(buf),\
612: MPIUNI_ARG(count),\
613: MPIUNI_ARG(datatype),\
614: MPIUNI_ARG(dest),\
615: MPIUNI_ARG(tag),\
616: MPIUNI_ARG(comm),\
617: MPIUNI_ARG(request),\
618: MPI_SUCCESS)
619: #define MPI_Recv_init(buf,count,datatype,source,tag,comm,request) \
620: (MPIUNI_ARG(buf),\
621: MPIUNI_ARG(count),\
622: MPIUNI_ARG(datatype),\
623: MPIUNI_ARG(source),\
624: MPIUNI_ARG(tag),\
625: MPIUNI_ARG(comm),\
626: MPIUNI_ARG(request),\
627: MPI_SUCCESS)
628: #define MPI_Start(request) \
629: (MPIUNI_ARG(request),\
630: MPI_SUCCESS)
631: #define MPI_Startall(count,array_of_requests) \
632: (MPIUNI_ARG(count),\
633: MPIUNI_ARG(array_of_requests),\
634: MPI_SUCCESS)
635: #define MPI_Sendrecv(sendbuf,sendcount,sendtype,\
636: dest,sendtag,recvbuf,recvcount,\
637: recvtype,source,recvtag,\
638: comm,status) \
639: (MPIUNI_ARG(dest),\
640: MPIUNI_ARG(sendtag),\
641: MPIUNI_ARG(recvcount),\
642: MPIUNI_ARG(recvtype),\
643: MPIUNI_ARG(source),\
644: MPIUNI_ARG(recvtag),\
645: MPIUNI_ARG(comm),\
646: MPIUNI_ARG(status),\
647: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
648: #define MPI_Sendrecv_replace(buf,count,datatype,dest,sendtag,\
649: source,recvtag,comm,status) \
650: (MPIUNI_ARG(buf),\
651: MPIUNI_ARG(count),\
652: MPIUNI_ARG(datatype),\
653: MPIUNI_ARG(dest),\
654: MPIUNI_ARG(sendtag),\
655: MPIUNI_ARG(source),\
656: MPIUNI_ARG(recvtag),\
657: MPIUNI_ARG(comm),\
658: MPIUNI_ARG(status),\
659: MPI_SUCCESS)
661: #define MPI_COMBINER_NAMED 0
662: #define MPI_COMBINER_DUP 1
663: #define MPI_COMBINER_CONTIGUOUS 2
664: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
665: #define MPI_Type_dup(oldtype,newtype) \
666: (*(newtype) = oldtype, MPI_SUCCESS)
667: #define MPI_Type_contiguous(count,oldtype,newtype) \
668: (*(newtype) = (MPI_COMBINER_CONTIGUOUS<<28)|((oldtype)&0x0ff00000)|(((oldtype)>>8&0xfff)*(count))<<8|((oldtype)&0xff), MPI_SUCCESS)
669: #define MPI_Type_vector(count,blocklength,stride,oldtype,newtype) \
670: (MPIUNI_ARG(count),\
671: MPIUNI_ARG(blocklength),\
672: MPIUNI_ARG(stride),\
673: MPIUNI_ARG(oldtype),\
674: MPIUNI_ARG(newtype),\
675: MPIUni_Abort(MPI_COMM_WORLD,0))
676: #define MPI_Type_hvector(count,blocklength,stride,oldtype,newtype) \
677: (MPIUNI_ARG(count),\
678: MPIUNI_ARG(blocklength),\
679: MPIUNI_ARG(stride),\
680: MPIUNI_ARG(oldtype),\
681: MPIUNI_ARG(newtype),\
682: MPIUni_Abort(MPI_COMM_WORLD,0))
683: #define MPI_Type_indexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
684: (MPIUNI_ARG(count),\
685: MPIUNI_ARG(array_of_blocklengths),\
686: MPIUNI_ARG(array_of_displacements),\
687: MPIUNI_ARG(oldtype),\
688: MPIUNI_ARG(newtype),\
689: MPIUni_Abort(MPI_COMM_WORLD,0))
690: #define MPI_Type_hindexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
691: (MPIUNI_ARG(count),\
692: MPIUNI_ARG(array_of_blocklengths),\
693: MPIUNI_ARG(array_of_displacements),\
694: MPIUNI_ARG(oldtype),\
695: MPIUNI_ARG(newtype),\
696: MPIUni_Abort(MPI_COMM_WORLD,0))
697: #define MPI_Type_struct(count,array_of_blocklengths,array_of_displacements,array_of_types,newtype) \
698: (MPIUNI_ARG(count),\
699: MPIUNI_ARG(array_of_blocklengths),\
700: MPIUNI_ARG(array_of_displacements),\
701: MPIUNI_ARG(array_of_types),\
702: MPIUNI_ARG(newtype),\
703: MPIUni_Abort(MPI_COMM_WORLD,0))
704: #define MPI_Address(location,address) \
705: (*(address) = (MPI_Aint)((char *)(location)), MPI_SUCCESS)
706: #define MPI_Type_size(datatype,size) (*(size) = MPI_sizeof((datatype)), MPI_SUCCESS)
707: #define MPI_Type_lb(datatype,lb) (MPIUNI_ARG(datatype), *(lb) = 0, MPI_SUCCESS)
708: #define MPI_Type_ub(datatype,ub) (*(ub) = MPI_sizeof((datatype)), MPI_SUCCESS)
709: #define MPI_Type_extent(datatype,extent) \
710: (*(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
711: #define MPI_Type_get_extent(datatype,lb,extent) \
712: (*(lb) = 0, *(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
713: #define MPI_Type_commit(datatype) (MPIUNI_ARG(datatype), MPI_SUCCESS)
714: #define MPI_Type_free(datatype) (*(datatype) = MPI_DATATYPE_NULL, MPI_SUCCESS)
715: #define MPI_Get_elements(status,datatype,count) \
716: (MPIUNI_ARG(status),\
717: MPIUNI_ARG(datatype),\
718: MPIUNI_ARG(count),\
719: MPIUni_Abort(MPI_COMM_WORLD,0))
720: #define MPI_Pack(inbuf,incount,datatype,outbuf,outsize,position,comm) \
721: (MPIUNI_ARG(inbuf),\
722: MPIUNI_ARG(incount),\
723: MPIUNI_ARG(datatype),\
724: MPIUNI_ARG(outbuf),\
725: MPIUNI_ARG(outsize),\
726: MPIUNI_ARG(position),\
727: MPIUNI_ARG(comm),\
728: MPIUni_Abort(MPI_COMM_WORLD,0))
729: #define MPI_Unpack(inbuf,insize,position,outbuf,outcount,datatype,comm) \
730: (MPIUNI_ARG(inbuf),\
731: MPIUNI_ARG(insize),\
732: MPIUNI_ARG(position),\
733: MPIUNI_ARG(outbuf),\
734: MPIUNI_ARG(outcount),\
735: MPIUNI_ARG(datatype),\
736: MPIUNI_ARG(comm),\
737: MPIUni_Abort(MPI_COMM_WORLD,0))
738: #define MPI_Pack_size(incount,datatype,comm,size) \
739: (MPIUNI_ARG(incount),\
740: MPIUNI_ARG(datatype),\
741: MPIUNI_ARG(comm),\
742: MPIUNI_ARG(size),\
743: MPIUni_Abort(MPI_COMM_WORLD,0))
744: #define MPI_Barrier(comm) \
745: (MPIUNI_ARG(comm),\
746: MPI_SUCCESS)
747: #define MPI_Bcast(buffer,count,datatype,root,comm) \
748: (MPIUNI_ARG(buffer),\
749: MPIUNI_ARG(count),\
750: MPIUNI_ARG(datatype),\
751: MPIUNI_ARG(root),\
752: MPIUNI_ARG(comm),\
753: MPI_SUCCESS)
754: #define MPI_Gather(sendbuf,sendcount,sendtype,\
755: recvbuf,recvcount, recvtype,\
756: root,comm) \
757: (MPIUNI_ARG(recvcount),\
758: MPIUNI_ARG(root),\
759: MPIUNI_ARG(recvtype),\
760: MPIUNI_ARG(comm),\
761: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
762: #define MPI_Gatherv(sendbuf,sendcount,sendtype,\
763: recvbuf,recvcounts,displs,\
764: recvtype,root,comm) \
765: (MPIUNI_ARG(recvcounts),\
766: MPIUNI_ARG(displs),\
767: MPIUNI_ARG(recvtype),\
768: MPIUNI_ARG(root),\
769: MPIUNI_ARG(comm),\
770: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
771: #define MPI_Scatter(sendbuf,sendcount,sendtype,\
772: recvbuf,recvcount,recvtype,\
773: root,comm) \
774: (MPIUNI_ARG(sendcount),\
775: MPIUNI_ARG(sendtype),\
776: MPIUNI_ARG(recvbuf),\
777: MPIUNI_ARG(recvtype),\
778: MPIUNI_ARG(root),\
779: MPIUNI_ARG(comm),\
780: MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
781: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
782: sendtype,recvbuf,recvcount,\
783: recvtype,root,comm) \
784: (MPIUNI_ARG(displs),\
785: MPIUNI_ARG(sendtype),\
786: MPIUNI_ARG(sendcounts),\
787: MPIUNI_ARG(root),\
788: MPIUNI_ARG(comm),\
789: MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
790: #define MPI_Allgather(sendbuf,sendcount,sendtype,\
791: recvbuf,recvcount,recvtype,comm) \
792: (MPIUNI_ARG(recvcount),\
793: MPIUNI_ARG(recvtype),\
794: MPIUNI_ARG(comm),\
795: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
796: #define MPI_Allgatherv(sendbuf,sendcount,sendtype,\
797: recvbuf,recvcounts,displs,recvtype,comm) \
798: (MPIUNI_ARG(recvcounts),\
799: MPIUNI_ARG(displs),\
800: MPIUNI_ARG(recvtype),\
801: MPIUNI_ARG(comm),\
802: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
803: #define MPI_Alltoall(sendbuf,sendcount,sendtype,\
804: recvbuf,recvcount,recvtype,comm) \
805: (MPIUNI_ARG(recvcount),\
806: MPIUNI_ARG(recvtype),\
807: MPIUNI_ARG(comm),\
808: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
809: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,sendtype,\
810: recvbuf,recvcounts,rdispls,recvtype,comm) \
811: (MPIUNI_ARG(sendbuf),\
812: MPIUNI_ARG(sendcounts),\
813: MPIUNI_ARG(sdispls),\
814: MPIUNI_ARG(sendtype),\
815: MPIUNI_ARG(recvbuf),\
816: MPIUNI_ARG(recvcounts),\
817: MPIUNI_ARG(rdispls),\
818: MPIUNI_ARG(recvtype),\
819: MPIUNI_ARG(comm),\
820: MPIUni_Abort(MPI_COMM_WORLD,0))
821: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,sendtypes,\
822: recvbuf,recvcounts,rdispls,recvtypes,comm) \
823: (MPIUNI_ARG(sendbuf),\
824: MPIUNI_ARG(sendcounts),\
825: MPIUNI_ARG(sdispls),\
826: MPIUNI_ARG(sendtypes),\
827: MPIUNI_ARG(recvbuf),\
828: MPIUNI_ARG(recvcount),\
829: MPIUNI_ARG(rdispls),\
830: MPIUNI_ARG(recvtypes),\
831: MPIUNI_ARG(comm),\
832: MPIUni_Abort(MPI_COMM_WORLD,0))
833: #define MPI_Reduce(sendbuf,recvbuf,count,datatype,op,root,comm) \
834: (MPIUNI_ARG(op),\
835: MPIUNI_ARG(root),\
836: MPIUNI_ARG(comm),\
837: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
838: #define MPI_Allreduce(sendbuf,recvbuf,count,datatype,op,comm) \
839: (MPIUNI_ARG(op),\
840: MPIUNI_ARG(comm),\
841: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
842: #define MPI_Iallreduce(sendbuf,recvbuf,count,datatype,op,comm,request) \
843: (MPIUNI_ARG(op),\
844: MPIUNI_ARG(comm),\
845: MPIUNI_ARG(request),\
846: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
847: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
848: (MPIUNI_ARG(op),\
849: MPIUNI_ARG(comm),\
850: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
851: #define MPI_Exscan(sendbuf,recvbuf,count,datatype,op,comm) \
852: (MPIUNI_ARG(sendbuf),\
853: MPIUNI_ARG(recvbuf),\
854: MPIUNI_ARG(count),\
855: MPIUNI_ARG(datatype),\
856: MPIUNI_ARG(op),\
857: MPIUNI_ARG(comm),\
858: MPI_SUCCESS)
859: #define MPI_Reduce_scatter(sendbuf,recvbuf,recvcounts,datatype,op,comm) \
860: (MPIUNI_ARG(op),\
861: MPIUNI_ARG(comm),\
862: MPIUNI_Memcpy(recvbuf,sendbuf,(*recvcounts)*MPI_sizeof(datatype)))
863: #define MPI_Op_create(function,commute,op) \
864: (MPIUNI_ARG(function),\
865: MPIUNI_ARG(commute),\
866: MPIUNI_ARG(op),\
867: MPI_SUCCESS)
868: #define MPI_Op_free(op) \
869: (*(op) = MPI_OP_NULL, MPI_SUCCESS)
871: #define MPI_Group_size(group,size) \
872: (MPIUNI_ARG(group),\
873: *(size)=1,\
874: MPI_SUCCESS)
875: #define MPI_Group_rank(group,rank) \
876: (MPIUNI_ARG(group),\
877: *(rank)=0,\
878: MPI_SUCCESS)
879: #define MPI_Group_translate_ranks(group1,n,ranks1,group2,ranks2) \
880: (MPIUNI_ARG(group1),\
881: MPIUNI_ARG(group2),\
882: MPIUNI_Memcpy((ranks2),(ranks1),(n)*MPI_sizeof(MPI_INT)))
883: #define MPI_Group_compare(group1,group2,result) \
884: (MPIUNI_ARG(group1),\
885: MPIUNI_ARG(group2),\
886: *(result)=1,\
887: MPI_SUCCESS)
888: #define MPI_Group_union(group1,group2,newgroup) \
889: (MPIUNI_ARG(group1),\
890: MPIUNI_ARG(group2),\
891: *(newgroup)=1,\
892: MPI_SUCCESS)
893: #define MPI_Group_intersection(group1,group2,newgroup) \
894: (MPIUNI_ARG(group1),\
895: MPIUNI_ARG(group2),\
896: *(newgroup)=1,\
897: MPI_SUCCESS)
898: #define MPI_Group_difference(group1,group2,newgroup) \
899: (MPIUNI_ARG(group1),\
900: MPIUNI_ARG(group2),\
901: *(newgroup)=MPI_GROUP_EMPTY,\
902: MPI_SUCCESS)
903: #define MPI_Group_range_incl(group,n,ranges,newgroup) \
904: (MPIUNI_ARG(group),\
905: MPIUNI_ARG(n),\
906: MPIUNI_ARG(ranges),\
907: *(newgroup)=1,\
908: MPI_SUCCESS)
909: #define MPI_Group_range_excl(group,n,ranges,newgroup) \
910: (MPIUNI_ARG(group),\
911: MPIUNI_ARG(n),\
912: MPIUNI_ARG(ranges),\
913: *(newgroup)=MPI_GROUP_EMPTY,\
914: MPI_SUCCESS)
915: #define MPI_Group_free(group) \
916: (*(group) = MPI_GROUP_NULL, MPI_SUCCESS)
918: #define MPI_Comm_compare(comm1,comm2,result) \
919: (MPIUNI_ARG(comm1),\
920: MPIUNI_ARG(comm2),\
921: *(result)=MPI_IDENT,\
922: MPI_SUCCESS)
923: #define MPI_Comm_split(comm,color,key,newcomm) \
924: (MPIUNI_ARG(color),\
925: MPIUNI_ARG(key),\
926: MPI_Comm_dup(comm,newcomm))
927: #define MPI_Comm_split_type(comm,color,key,info,newcomm) \
928: (MPIUNI_ARG(color),\
929: MPIUNI_ARG(key),\
930: MPIUNI_ARG(info),\
931: MPI_Comm_dup(comm,newcomm))
932: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1, MPI_SUCCESS)
933: #define MPI_Comm_remote_size(comm,size) (*(size)=1 ,MPI_SUCCESS)
934: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
935: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
936: remote_leader,tag,newintercomm) MPI_SUCCESS
937: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
938: #define MPI_Topo_test(comm,flag) MPI_SUCCESS
939: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
940: reorder,comm_cart) MPIUni_Abort(MPI_COMM_WORLD,0)
941: #define MPI_Dims_create(nnodes,ndims,dims) MPIUni_Abort(MPI_COMM_WORLD,0)
942: #define MPI_Graph_create(comm,a,b,c,d,e) MPIUni_Abort(MPI_COMM_WORLD,0)
943: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPIUni_Abort(MPI_COMM_WORLD,0)
944: #define MPI_Graph_get(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
945: #define MPI_Cartdim_get(comm,ndims) MPIUni_Abort(MPI_COMM_WORLD,0)
946: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
947: MPIUni_Abort(MPI_COMM_WORLD,0)
948: #define MPI_Cart_rank(comm,coords,rank) MPIUni_Abort(MPI_COMM_WORLD,0)
949: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
950: MPIUni_Abort(MPI_COMM_WORLD,0)
951: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
952: MPIUni_Abort(MPI_COMM_WORLD,0)
953: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
954: MPIUni_Abort(MPI_COMM_WORLD,0)
955: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
956: MPIUni_Abort(MPI_COMM_WORLD,0)
957: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPIUni_Abort(MPI_COMM_WORLD,0)
958: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPIUni_Abort(MPI_COMM_WORLD,0)
959: #define MPI_Graph_map(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
961: #define MPI_Get_processor_name(name,result_len) \
962: (*(result_len) = 9,MPIUNI_Memcpy(name,"localhost",10*MPI_sizeof(MPI_CHAR)))
963: #define MPI_Errhandler_create(function,errhandler) \
964: (MPIUNI_ARG(function),\
965: *(errhandler) = MPI_ERRORS_RETURN,\
966: MPI_SUCCESS)
967: #define MPI_Errhandler_set(comm,errhandler) \
968: (MPIUNI_ARG(comm),\
969: MPIUNI_ARG(errhandler),\
970: MPI_SUCCESS)
971: #define MPI_Errhandler_get(comm,errhandler) \
972: (MPIUNI_ARG(comm),\
973: (*errhandler) = MPI_ERRORS_RETURN,\
974: MPI_SUCCESS)
975: #define MPI_Errhandler_free(errhandler) \
976: (*(errhandler) = MPI_ERRHANDLER_NULL,\
977: MPI_SUCCESS)
978: #define MPI_Error_string(errorcode,string,result_len) \
979: (MPIUNI_ARG(errorcode),\
980: (errorcode == MPI_ERR_NOSUPPORT) ? \
981: (*(result_len) = 35, MPIUNI_Memcpy(string,"MPI error, not supported by MPI-uni",35*MPI_sizeof(MPI_CHAR))) : \
982: (*(result_len) = 9, MPIUNI_Memcpy(string,"MPI error",9*MPI_sizeof(MPI_CHAR))))
983: #define MPI_Error_class(errorcode,errorclass) \
984: (*(errorclass) = errorcode, MPI_SUCCESS)
985: #define MPI_Wtick() 1.0
986: #define MPI_Pcontrol(level) MPI_SUCCESS
988: /* MPI-IO additions */
990: typedef int MPI_File;
991: #define MPI_FILE_NULL 0
993: typedef int MPI_Offset;
995: #define MPI_MODE_RDONLY 0
996: #define MPI_MODE_WRONLY 0
997: #define MPI_MODE_CREATE 0
999: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
1000: (MPIUNI_ARG(comm),\
1001: MPIUNI_ARG(filename),\
1002: MPIUNI_ARG(amode),\
1003: MPIUNI_ARG(info),\
1004: MPIUNI_ARG(mpi_fh),\
1005: MPIUni_Abort(MPI_COMM_WORLD,0))
1007: #define MPI_File_close(mpi_fh) \
1008: (MPIUNI_ARG(mpi_fh),\
1009: MPIUni_Abort(MPI_COMM_WORLD,0))
1011: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
1012: (MPIUNI_ARG(mpi_fh),\
1013: MPIUNI_ARG(disp),\
1014: MPIUNI_ARG(etype),\
1015: MPIUNI_ARG(filetype),\
1016: MPIUNI_ARG(datarep),\
1017: MPIUNI_ARG(info),\
1018: MPIUni_Abort(MPI_COMM_WORLD,0))
1020: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
1021: (MPIUNI_ARG(mpi_fh),\
1022: MPIUNI_ARG(buf),\
1023: MPIUNI_ARG(count),\
1024: MPIUNI_ARG(datatype),\
1025: MPIUNI_ARG(status),\
1026: MPIUni_Abort(MPI_COMM_WORLD,0))
1028: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
1029: (MPIUNI_ARG(mpi_fh),\
1030: MPIUNI_ARG(buf),\
1031: MPIUNI_ARG(count),\
1032: MPIUNI_ARG(datatype),\
1033: MPIUNI_ARG(status),\
1034: MPIUni_Abort(MPI_COMM_WORLD,0))
1036: #define MPI_File_write_at(mpi_fh,off,buf,count,datatype,status) \
1037: (MPIUNI_ARG(mpi_fh),\
1038: MPIUNI_ARG(off),\
1039: MPIUNI_ARG(buf),\
1040: MPIUNI_ARG(count),\
1041: MPIUNI_ARG(datatype),\
1042: MPIUNI_ARG(status),\
1043: MPIUni_Abort(MPI_COMM_WORLD,0))
1045: #define MPI_File_read_at(mpi_fh,off,buf,count,datatype,status) \
1046: (MPIUNI_ARG(mpi_fh),\
1047: MPIUNI_ARG(off),\
1048: MPIUNI_ARG(buf),\
1049: MPIUNI_ARG(count),\
1050: MPIUNI_ARG(datatype),\
1051: MPIUNI_ARG(status),\
1052: MPIUni_Abort(MPI_COMM_WORLD,0))
1054: #define MPI_File_write_at_all(mpi_fh,off,buf,count,datatype,status) \
1055: (MPIUNI_ARG(mpi_fh),\
1056: MPIUNI_ARG(off),\
1057: MPIUNI_ARG(buf),\
1058: MPIUNI_ARG(count),\
1059: MPIUNI_ARG(datatype),\
1060: MPIUNI_ARG(status),\
1061: MPIUni_Abort(MPI_COMM_WORLD,0))
1063: #define MPI_File_read_at_all(mpi_fh,off,buf,count,datatype,status) \
1064: (MPIUNI_ARG(mpi_fh),\
1065: MPIUNI_ARG(off),\
1066: MPIUNI_ARG(buf),\
1067: MPIUNI_ARG(count),\
1068: MPIUNI_ARG(datatype),\
1069: MPIUNI_ARG(status),\
1070: MPIUni_Abort(MPI_COMM_WORLD,0))
1072: /* called from PetscInitialize() - so return success */
1073: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
1074: (MPIUNI_ARG(name),\
1075: MPIUNI_ARG(read_conv_fn),\
1076: MPIUNI_ARG(write_conv_fn),\
1077: MPIUNI_ARG(extent_fn),\
1078: MPIUNI_ARG(state),\
1079: MPI_SUCCESS)
1081: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
1082: (MPIUNI_ARG(ndims),\
1083: MPIUNI_ARG(array_of_sizes),\
1084: MPIUNI_ARG(array_of_subsizes),\
1085: MPIUNI_ARG(array_of_starts),\
1086: MPIUNI_ARG(order),\
1087: MPIUNI_ARG(oldtype),\
1088: MPIUNI_ARG(newtype),\
1089: MPIUni_Abort(MPI_COMM_WORLD,0))
1091: #define MPI_Type_create_resized(oldtype,lb,extent,newtype) \
1092: (MPIUNI_ARG(oldtype),\
1093: MPIUNI_ARG(lb),\
1094: MPIUNI_ARG(extent),\
1095: MPIUNI_ARG(newtype),\
1096: MPIUni_Abort(MPI_COMM_WORLD,0))
1098: #define MPI_Type_create_indexed_block(count,blocklength,array_of_displacements,oldtype,newtype) \
1099: (MPIUNI_ARG(count),\
1100: MPIUNI_ARG(blocklength),\
1101: MPIUNI_ARG(array_of_displacements),\
1102: MPIUNI_ARG(oldtype),\
1103: MPIUNI_ARG(newtype),\
1104: MPIUni_Abort(MPI_COMM_WORLD,0))
1106: #if defined(__cplusplus)
1107: }
1108: #endif
1109: #endif