How would I rewrite C++ code that makes use of MPI calls. For example, this code below:
#include<mpi.h>
#include<stdio.h>
int main(int argc, char *argv[])
{
int rank, k;
int size;
int ndims = 3;
int source, dest;
int up,down,right,left,up3, down3;
int edges[6][4] = {{0,1,5,4},
{4,5,7,6},
{2,3,1,0},
{6,7,3,2},
{1,3,7,5},
{0,2,6,7}};
int t, incep=0;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Comm comm, comm3d;
int dims[3]={0,0,0}, coords[3]={0,0,0},
periods[3]={1,1,1}, reorder = 0;
MPI_Status status;
int user_edge;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Dims_create(size, ndims, dims);
MPI_Cart_create(MPI_COMM_WORLD, ndims, dims, periods, reorder, &comm);
MPI_Cart_coords(comm, rank, 3, coords);
fflush(stdout);
printf("Rank %d coordinates are %d %d %d\n", rank, coords[0], coords[1], coords[2]);
MPI_Barrier(comm);
int leftrank, rightrank;
int downrank, uprank;
MPI_Comm_rank(comm, &rank);
MPI_Cart_coords(comm, rank, 2, coords);
MPI_Cart_shift(comm, 0, -1, &downrank, &uprank);
MPI_Sendrecv(buffer, 10, MPI_INT, downrank, 123, buffer2, 10, MPI_INT, uprank, 123, comm, &status);
MPI_Cart_shift(comm, 1, -1, &rightrank, &leftrank);
MPI_Sendrecv(buffer, 10, MPI_INT, leftrank, 123, buffer2, 10, MPI_INT, rightrank, 123, comm, &status);
printf("P:%d My neighbors are rightRank: %d downRank:%d leftRank:%d upRank:%d diagonal:%d diagonalX:%d\n", rank,rightrank,downrank,leftrank,uprank,diagonal,diagonalX);
MPI_Finalize();
return 0;
}
You either wrap mpi.h and make a Nim API. Rewrite your program using this API
If you don't want to wrap MPI, you can probably use emit for the specific MPI related section of your code.. But I wouldn't recommend this solution as I find emit not portable
You could use this far from exhaustive wrapper code of mine as a basis:
# mpi.nim
type
MPI_Comm* {.header: "<mpi.h>"} = object
MPI_Datatype* {.header: "<mpi.h>"} = object
MPI_Op* {.header: "<mpi.h>"} = object
MPIError* = object of IOError
code*: MPIErrorCode
MPIErrorCode* = distinct cint
proc `==`(a,b: MPIErrorCode): bool {.borrow.}
var
MPI_COMM_WORLD* {.header: "<mpi.h>".}: MPI_Comm
MPI_SUCCESS* {.header: "<mpi.h>".}: MPIErrorCode
var
MPI_MAX* {.header: "<mpi.h>".}: MPI_Op ## maximum
MPI_MIN* {.header: "<mpi.h>".}: MPI_Op ## minimum
MPI_SUM* {.header: "<mpi.h>".}: MPI_Op ## sum
MPI_PROD* {.header: "<mpi.h>".}: MPI_Op ## product
MPI_LAND* {.header: "<mpi.h>".}: MPI_Op ## logical and
MPI_BAND* {.header: "<mpi.h>".}: MPI_Op ## bit-wise and
MPI_LOR* {.header: "<mpi.h>".}: MPI_Op ## logical or
MPI_BOR* {.header: "<mpi.h>".}: MPI_Op ## bit-wise or
MPI_LXOR* {.header: "<mpi.h>".}: MPI_Op ## logical exclusive or (xor)
MPI_BXOR* {.header: "<mpi.h>".}: MPI_Op ## bit-wise exclusive or (xor)
MPI_MAXLOC* {.header: "<mpi.h>".}: MPI_Op ## max value and location
MPI_MINLOC* {.header: "<mpi.h>".}: MPI_Op ## min value and location
var
MPI_FLOAT* {.header: "<mpi.h>"}: MPI_Datatype
MPI_DOUBLE* {.header: "<mpi.h>"}: MPI_Datatype
let
MPI_NIMFLOAT* = MPI_DOUBLE
proc MPI_Init*(argc: ptr cint, argv: ptr cStringArray): MPIErrorCode {.importc, header: "<mpi.h>".}
proc MPI_Finalize*(): MPIErrorCode {.importc, header: "<mpi.h>".}
proc MPI_Error_string*(c: MPIErrorCode, s: cstring, len: ptr cint): MPIErrorCode {.importc, header: "<mpi.h>".}
proc MPI_Abort*(comm: MPI_Comm, errc: cint) {.importc, header: "<mpi.h>"}
proc MPI_Reduce*(sendbug: pointer, recvbuf: pointer, count: cint, datatype: MPi_Datatype,
op: MPI_Op, root: cint, comm: MPI_Comm): MPIErrorCode {.importc, header: "<mpi.h>"}
proc MPI_Allreduce*(sendbug: pointer, recvbuf: pointer, count: cint, datatype: MPi_Datatype,
op: MPI_Op, comm: MPI_Comm): MPIErrorCode {.importc, header: "<mpi.h>"}
proc MPI_Scatter*(sendbuf: pointer, sendcount: cint, sendtype: MPI_Datatype,
recvbuf: pointer, recvcount: cint, recvtype: MPI_Datatype,
root: cint, comm: MPI_Comm): MPIErrorCode {.importc, header: "<mpi.h>"}
proc MPI_Comm_rank*(comm: MPI_Comm, rank: ptr cint): MPIErrorCode {.importc, header: "<mpi.h>"}
proc MPI_Comm_size*(comm: MPI_Comm, size: ptr cint): MPIErrorCode {.importc, header: "<mpi.h>"}
proc MPI_Barrier*(comm: MPI_Comm): MPIErrorCode {.importc, header: "<mpi.h>"}
proc check*(c: MPIErrorCode) =
if c != MPI_SUCCESS:
let msg = cast[cstring](alloc0(513));
var msgLen: cint
discard MPI_Error_string(c, msg, addr msgLen)
raise newException(MPIError, ($msg)[0..<msgLen])
Example
import mpi
var
argc: cint = 0
argv: cStringArray
# check raises an exception on error
check MPI_Init(addr argc. addr argv)
var rank: cint
check MPI_Comm_rank(MPI_COMM_WORLD, addr rank)
echo rank
check MPI_Finalize()
It is meant to be compiled the mpicc wrappers like this: nim c --gcc.exe="mpicc" --gcc.linkerexe="mpicc"
Note: I chose object for declaring MPI API types but this is a lie to acomodate different MPI implementations (pointers to opaque structs for openmpi, C int for mpich etc…)