是否可以分配一个未命名的内存,然后在 Fortran 中关联指针?

is it possible to allocate an unname memory and then associate pointers in fortran?

我的问题是由自定义 Fortran 数据类型引起的。举例如下:

type mytype
    integer, pointer :: a(:)
    real, pointer :: b(:)
end type 

type(mytype), pointer :: data(:)
! this MPI routine reserves a memory region
call mpi_win_allocate_shared(size,disp_unit,...,baseptr,win,ierr)

! is the following achievable? 
do i = 1,n1
    call c_to_f_pointer(baseptr + offset_i, data)
    call c_to_f_pointer(baseptr + offset_ij, data(i)%a)
    call c_to_f_pointer(baseptr + offset_ik, data(i)%b)
enddo

好的,你想切碎一个(共享)内存区域并让不同的指针指向它的不同位,有不同的进程能够读取它。以前从未这样做过,这是我的第一个最佳猜测。它假设你要使用的所有数据都占据了用来表示一个整数的字节数的整数倍:

ijb@ijb-Latitude-5410:~/work/stack$ mpif90 --version
GNU Fortran (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
Copyright (C) 2019 Free Software Foundation, Inc.
This is free software; see the source for copying conditions.  There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

ijb@ijb-Latitude-5410:~/work/stack$ mpif90 --showme:version
mpif90: Open MPI 4.0.3 (Language: Fortran)
ijb@ijb-Latitude-5410:~/work/stack$ cat shared2.f90
Program main 

  Use, Intrinsic :: iso_c_binding  , Only : c_ptr, c_loc, c_f_pointer, c_int, c_double
  Use, Intrinsic :: iso_fortran_env, Only : numeric_storage_size
  
  Use mpi_f08, Only : mpi_address_kind, mpi_comm_world, mpi_info, mpi_win, &
       mpi_info_null, mpi_proc_null, mpi_init, mpi_comm_size, mpi_comm_rank, &
       mpi_win_allocate_shared, mpi_win_shared_query, mpi_win_fence, &
       mpi_win_free, mpi_free_mem, mpi_finalize

  Implicit None ( Type, External )

  
  Integer, Parameter :: n_int   = 2 ! number of ints in the structure
  Integer, Parameter :: n_real  = 3 ! number of reals in the structures
  Integer, Parameter :: n_stuff = 4 ! number of elements in the array of structures

  Type :: my_struct
     Integer( c_int    ), Dimension( : ), Pointer :: value1
     Real   ( c_double ), Dimension( : ), Pointer :: value2
  End type my_struct
  
  Type( my_struct ), Dimension( : ), Allocatable :: stuff
  Type( mpi_info  ) :: info 
  Type( mpi_win   ) :: window
  Type( c_ptr     ) :: baseptr

  Integer( c_int ), Dimension( : ), Pointer :: shared_memory
  
  Integer( mpi_address_kind ) :: bytes

  Integer :: size_int, size_real
  Integer :: size_shared
  Integer :: disp_unit
  Integer :: w_rank, w_size, ierr
  Integer :: point
  Integer :: i
  
  Call mpi_init( ierr )
  Call mpi_comm_size( mpi_comm_world, w_size, ierr )
  Call mpi_comm_rank( mpi_comm_world, w_rank, ierr )

  Allocate( stuff( 1:n_stuff ) )
  
  size_int  = storage_size( stuff( 1 )%value1 ) / numeric_storage_size
  size_real = storage_size( stuff( 1 )%value2 ) / numeric_storage_size

  info = mpi_info_null
  disp_unit = 1
  If( w_rank == 0 ) Then
     bytes = n_stuff * ( size_int * n_int + size_real * n_real )
  Else
     bytes = 0
  End If
  Call mpi_win_allocate_shared( bytes, disp_unit, info, mpi_comm_world, baseptr, window, ierr )
  Call mpi_win_shared_query( window, mpi_proc_null, bytes, disp_unit, baseptr, ierr )

  size_shared = Int( bytes / size_int, Kind = Kind( size_shared ) )
  Call c_f_pointer( baseptr, shared_memory, [ size_shared ] )
  point = 1
  Do i = 1, n_stuff
     Call c_f_pointer( C_loc( shared_memory( point ) ), stuff( i )%value1, [ n_int ] )
     point = point + n_int
     Call c_f_pointer( C_loc( shared_memory( point ) ), stuff( i )%value2, [ n_real ] )
     point = point + n_real * ( size_real / size_int )
  End Do

  If( w_rank == 0 ) Then
     Do i = 1, Size( stuff )
        stuff( i )%value1 = 1 * i
        stuff( i )%value2 = 2.0_c_double * i
     End Do
  End If
  Call mpi_win_fence( 0, window, ierr )

  If( w_rank == 2 ) Then
     Do i = 1, Size( stuff )
        Write( *, * ) stuff( i )%value1, stuff( i )%value2
     End Do
  End If

  Call mpi_win_free( window, ierr )

  Call mpi_finalize( ierr )
  
End Program main
ijb@ijb-Latitude-5410:~/work/stack$ mpif90 -Wall -Wextra -fcheck=all -O -g -std=f2018 shared2.f90
ijb@ijb-Latitude-5410:~/work/stack$ mpirun -np 3 ./a.out 
           1           1   2.0000000000000000        2.0000000000000000        2.0000000000000000     
           2           2   4.0000000000000000        4.0000000000000000        4.0000000000000000     
           3           3   6.0000000000000000        6.0000000000000000        6.0000000000000000     
           4           4   8.0000000000000000        8.0000000000000000        8.0000000000000000     
ijb@ijb-Latitude-5410:~/work/stack$ mpirun -np 3 ./a.out 
           1           1   2.0000000000000000        2.0000000000000000        2.0000000000000000     
           2           2   4.0000000000000000        4.0000000000000000        4.0000000000000000     
           3           3   6.0000000000000000        6.0000000000000000        6.0000000000000000     
           4           4   8.0000000000000000        8.0000000000000000        8.0000000000000000