Problem with PYBIND11_MODULE

Hi all,

I have this piece of code:

def distribution(number):
    "Get distribution of number on all processes"
    if not hasattr(distribution, "cpp_module"):
        cpp_code = '''
        #include <vector> 
        #include "mpi.h"
        #include <dolfin.h>
        #include <pybind11/pybind11.h>
        #include <pybind11/stl.h>
        #include <pybind11/complex.h>
        #include <pybind11/functional.h>
        #include <pybind11/chrono.h>
        typedef unsigned int uint; 
        namespace py = pybind11;
        namespace dolfin {
            std::vector<unsigned int> distribution(const MPI_Comm mpi_comm, int number)
            {
                // Variables to help in synchronization
                int num_processes, this_process;
                MPI_Comm_size(mpi_comm, &num_processes);
                MPI_Comm_rank(mpi_comm, &this_process);

                std::vector<uint> distribution(num_processes);
                for(int i=0; i<num_processes; i++) {
                    if(i==this_process) {
                        distribution[i] = number;
                    }
                    MPI_Barrier(mpi_comm); 
                    MPI_Bcast(&distribution[0], distribution.size(), MPI_UNSIGNED, i, mpi_comm); 
                }
                return distribution;
          }
        }
        PYBIND11_MODULE(SIGNATURE, m) {
                m.def("distribution", &dolfin::distribution, py::arg("mpi_comm"), py::arg("number"));}
        '''
        distribution.cpp_module = compile_cpp_code(cpp_code)
    cpp_module = distribution.cpp_module
    return cpp_module.distribution(MPI.comm_world, number)

Whenever I try an integer let’s say distribution(200) is gives me the following error:

TypeError: distribution(): incompatible function arguments. The following argument types are supported:
    1. (mpi_comm: int, number: int) -> List[int]

Invoked with: <mpi4py.MPI.Intracomm object at 0x7f1b07286fa8>, 200

My guess is there is something wrong with the PYBIND11_MODULE part. But I am not good with C++ and have never worked with PYBIND11_MODULE. So anything to help me out here would be much appreciated.

Best,

See for instance:


which uses the MPICommWrapper.h to send an MPI communicator to C++

I guess you could Also have a look at:
1 Like

Thank you dokken.
This function was a part of a code I used to go around the problem of submeshes and MPI. But the code was very old and I had to change a lot of deprecated functions and I think in the process I confused myself too much. Is there a new Fenics feature than can help with the issue of submeshes and MPI? I use Fenics 2019.

Also I created this based on your suggestions:

filename is MPICommWrapper.h and it goes like:

#ifndef __MPI_COMM_WRAPPER_H
#define __MPI_COMM_WRAPPER_H

//#include <dolfin/common/MPI.h>
#include "mpi.h"

namespace dolfin_wrappers
{

  /// This class wraps the MPI_Comm type for use in the pybind11
  /// generation of python wrappers. MPI_Comm is either a pointer or
  /// an int (MPICH vs OpenMPI) and this cannot be wrapped in a type
  /// safe way with pybind11.

  class MPICommWrapper
  {
  public:

    MPICommWrapper() : _comm(MPI_COMM_NULL) {}

    /// Wrap a MPI_Comm object
    MPICommWrapper(MPI_Comm comm) : _comm(comm) {}

    /// Assignment operator
    MPICommWrapper& operator=(const MPI_Comm comm){
        this->_comm = comm;
	return *this;
    }

    /// Get the underlying MPI communicator
    MPI_Comm get() const{
        return _comm;
    }

  private:

    // The underlying communicator
    MPI_Comm _comm;

  };
}

Then I changed my first code to:

def distribution(number):
    "Get distribution of number on all processes"
    if not hasattr(distribution, "cpp_module"):
        cpp_code = '''
        #include <vector> 
        #include "mpi.h"
        #include <dolfin.h>
        #include <pybind11/pybind11.h>
        #include <pybind11/stl.h>
        #include <pybind11/complex.h>
        #include <pybind11/functional.h>
        #include <pybind11/chrono.h>
        #include "/home/nm24a/sample2/MPICommWrapper.h"
        using namespace dolfin_wrappers; 
        typedef unsigned int uint; 
        namespace py = pybind11;
        namespace dolfin {
            std::vector<unsigned int> distribution(const MPICommWrapper mpi_comm, int number)
            {
                // Variables to help in synchronization
                int num_processes, this_process;
                MPI_Comm_size(mpi_comm, &num_processes);
                MPI_Comm_rank(mpi_comm, &this_process);

                std::vector<uint> distribution(num_processes);
                for(int i=0; i<num_processes; i++) {
                    if(i==this_process) {
                        distribution[i] = number;
                    }
                    MPI_Barrier(mpi_comm); 
                    MPI_Bcast(&distribution[0], distribution.size(), MPI_UNSIGNED, i, mpi_comm); 
                }
                return distribution;
          }
        }
        PYBIND11_MODULE(SIGNATURE, m) {
                m.def("distribution", &dolfin::distribution, py::arg("mpi_comm"), py::arg("number"));}
        '''
        distribution.cpp_module = compile_cpp_code(cpp_code)
    cpp_module = distribution.cpp_module
#    assert isinstance(dolfin.MPI.comm_world, dolfin.cpp.MPICommWrapper)
    return cpp_module.distribution(MPI.COMM_WORLD, number)

Now I receive this error:

/tmp/tmpo5ypgwrh/dolfin_cpp_module_49d32047546e38ba663920023348800b.cpp: In function 'std::vector<unsigned int> dolfin::distribution(dolfin_wrappers::MPICommWrapper, int)':
/tmp/tmpo5ypgwrh/dolfin_cpp_module_49d32047546e38ba663920023348800b.cpp:19:55: error: cannot convert 'const dolfin_wrappers::MPICommWrapper' to 'MPI_Comm {aka int}' for argument '1' to 'int MPI_Comm_size(MPI_Comm, int*)'
                 MPI_Comm_size(mpi_comm, &num_processes);
                                                       ^
/tmp/tmpo5ypgwrh/dolfin_cpp_module_49d32047546e38ba663920023348800b.cpp:20:54: error: cannot convert 'const dolfin_wrappers::MPICommWrapper' to 'MPI_Comm {aka int}' for argument '1' to 'int MPI_Comm_rank(MPI_Comm, int*)'
                 MPI_Comm_rank(mpi_comm, &this_process);
                                                      ^
/tmp/tmpo5ypgwrh/dolfin_cpp_module_49d32047546e38ba663920023348800b.cpp:27:41: error: cannot convert 'const dolfin_wrappers::MPICommWrapper' to 'MPI_Comm {aka int}' for argument '1' to 'int MPI_Barrier(MPI_Comm)'
                     MPI_Barrier(mpi_comm); 
                                         ^
In file included from /usr/local/petsc-32/include/petscsys.h:1459:0,
                 from /usr/local/include/dolfin/common/types.h:24,
                 from /usr/local/include/dolfin/common/dolfin_common.h:9,
                 from /usr/local/include/dolfin.h:6,
                 from /tmp/tmpo5ypgwrh/dolfin_cpp_module_49d32047546e38ba663920023348800b.cpp:4:
/tmp/tmpo5ypgwrh/dolfin_cpp_module_49d32047546e38ba663920023348800b.cpp:28:21: error: cannot convert 'const dolfin_wrappers::MPICommWrapper' to 'MPI_Comm {aka int}' for argument '1' to 'int PetscMPIParallelComm(MPI_Comm)'
                     MPI_Bcast(&distribution[0], distribution.size(), MPI_UNSIGNED, i, mpi_comm); 
                     ^
/tmp/tmpo5ypgwrh/dolfin_cpp_module_49d32047546e38ba663920023348800b.cpp:28:21: error: cannot convert 'const dolfin_wrappers::MPICommWrapper' to 'MPI_Comm {aka int}' for argument '5' to 'int MPI_Bcast(void*, int, MPI_Datatype, int, MPI_Comm)'
                     MPI_Bcast(&distribution[0], distribution.size(), MPI_UNSIGNED, i, mpi_comm); 
                     ^