处理从同一文件中读取数据

问题描述 投票:0回答:1

我有一个mesh文件,我用METIS对它进行了分区(分为4个部分进程),METIS给我提供了mesh的分区文件(给了我一个文件,上面有mesh每个元素所属的进程数),我现在的工作是把这些信息输入到我的并行代码中,我试图通过让每个进程访问同一个mesh文件,并根据分区文件读取它想要的数据来实现。

#include <iostream>
#include <fstream>
#include <sstream>
#include "mpi.h"
using namespace std;
//each process stores the partitioning
int* PartitionFile(){
 ifstream file ("epart.txt");
 int NE=14;
 int part,i=0;
 int *partition=new int[14];
 while(file>>part){
   partition[i]=part;
   i++;
 }
 file.close();
 return partition;
}
int FindSizeOfLocalElements(int *epart,int rank){
  int size=0;
  for (int i=0;i<14;i++){
   if(epart[i]==rank){
    size+=1;
   }
  }
  return size;
}
//stores the elements of each process
int * LocalPartition(int* epart,int size,int rank){

  int *localPart=new int[size];
  int j=0;
 for(int i=0;i<14;i++){
    if (epart[i]==rank){
        localPart[j]=i+1;//+1 because elements start from 1(global numbering)
        j+=1;
    }
  }
return localPart;
}
int **ElementConnectivityMeshFile(int* localPart,int size){
  ifstream file ("mesh.txt");
  int node1,node2,node3;
  int elem=1;
  int i=0;
  int **elemConn=new int*[size];
  for(int j=0;j<size;j++){
    elemConn[j]=new int[3];//each element has 3 nodes.Here elements has local numbering.Global numbering is stored in localPart
 }
  while(file>>node1>>node2>>node3){
    if (elem==localPart[i]){
        elemConn[i][0]=node1;
        elemConn[i][1]=node2;
        elemConn[i][2]=node3;
        i+=1;
    }
    elem+=1;
    if(elem>14){break;}
  }
  file.close();
  return elemConn;
}




int main(){
  MPI_Init(NULL, NULL);
  int numProc;
  MPI_Comm_size(MPI_COMM_WORLD, &numProc);
  int rank;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  int *epart=PartitionFile();
  int size=FindSizeOfLocalElements(epart,rank);
  int *elem=LocalPartition(epart,size,rank);
  int **elemConn=ElementConnectivityMeshFile(elem,size);
  MPI_Finalize();
  return 0;
}

这部分代码给了我想要的结果,但是我想知道通过使用c++标准函数让MPI进程读取同一个文件的效率如何,以及这是否会影响可扩展性和性能。

mesh file
1 3 2
2 3 4
3 5 4
4 5 6
5 7 6
8 7 5
3 8 5
9 7 8
9 8 3
1 9 3
10 9 1
11 10 1
11 1 12
12 1 2

epart file
2
2
0
0
0
1
0
1
1
3
3
3
2
2
parallel-processing process mpi finite-element-analysis
1个回答
1
投票

我认为这个程序说明了使用MPI-IO与二进制文件的基本方法。

#include <stdio.h>
#include <mpi.h>

#define NELEM 14
#define NVERT  3

int meshfile[NELEM][NVERT] =
  { { 1,  3,  2},
    { 2,  3,  4},
    { 3,  5,  4},
    { 4,  5,  6},
    { 5,  7,  6},
    { 8,  7,  5},
    { 3,  8,  5},
    { 9,  7,  8},
    { 9,  8,  3},
    { 1,  9,  3},
    {10,  9,  1},
    {11, 10,  1},
    {11,  1, 12},
    {12,  1,  2}, };

int partfile[NELEM] = {2, 2, 0, 0, 0, 1, 0, 1, 1, 3, 3, 3, 2, 2};

int main(void)
{
  int i;

  int part[NELEM];
  int mesh[NELEM][NVERT];

  /* Should really malloc smaller mesh based on local size */

  FILE *fp;

  int rank, size;
  MPI_Comm comm;
  MPI_Status status;
  MPI_File fh;
  MPI_Datatype filetype;
  int disp[NELEM];
  int nelemlocal;

  /* Should really malloc smaller displ based on nelemlocal */

  comm = MPI_COMM_WORLD;

  MPI_Init(NULL, NULL);

  MPI_Comm_size(comm, &size);
  MPI_Comm_rank(comm, &rank);

  if (rank == 0)
    {
      printf("Running on %d processes\n", size);

      // data files should already exist but create them here so we
      // have a self-contained program

      fp = fopen("mesh.dat", "w");
      fwrite(meshfile, sizeof(int), NELEM*NVERT, fp);
      fclose(fp);

      fp = fopen("part.dat", "w");
      fwrite(partfile, sizeof(int), NELEM, fp);
      fclose(fp);
    }

  // could read on rank 0 and broadcast, but using MPI-IO then
  // "readall" should take an efficient collective approach
  // every rank read the whole partition file

  MPI_File_open(comm, "part.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
  MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
  MPI_File_read_all(fh, part, NELEM, MPI_INT, &status);
  MPI_File_close(&fh);

  nelemlocal = 0;

  // pick out local elements and record displacements

  for (i=0; i < NELEM; i++)
    {
      if (part[i] == rank)
        {
          disp[nelemlocal] = i*NVERT;
          nelemlocal += 1;
        }
    }

  printf("on rank %d, nelemlocal = %d\n", rank, nelemlocal);

  // create the MPI datatype to use as the filetype, which is
  // effectively a mask that selects only the elements for this rank

  MPI_Type_create_indexed_block(nelemlocal, NVERT, disp, MPI_INT, &filetype);
  MPI_Type_commit(&filetype);

  MPI_File_open(comm, "mesh.dat", MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);

  // set the file view appropriate to this rank
  MPI_File_set_view(fh, 0, MPI_INT, filetype, "native", MPI_INFO_NULL);

  // each rank only reads its own set of elements based on file view
  MPI_File_read_all(fh, mesh, nelemlocal*NVERT, MPI_INT, &status);

  MPI_File_close(&fh);

  // check we got the correct data

  for (i=0; i < nelemlocal; i++)
    {
      printf("on rank %d, mesh[%d] = %d, %d, %d\n",
         rank, i, mesh[i][0], mesh[i][1], mesh[i][2]);
    }

  MPI_Finalize();
}

而且它似乎给出了正确的答案。

dsh@laptop$ mpicc -o metisio metisio.c
dsh@laptop$ mpirun -n 4 ./metisio | sort
on rank 0, mesh[0] = 3, 5, 4
on rank 0, mesh[1] = 4, 5, 6
on rank 0, mesh[2] = 5, 7, 6
on rank 0, mesh[3] = 3, 8, 5
on rank 0, nelemlocal = 4
on rank 1, mesh[0] = 8, 7, 5
on rank 1, mesh[1] = 9, 7, 8
on rank 1, mesh[2] = 9, 8, 3
on rank 1, nelemlocal = 3
on rank 2, mesh[0] = 1, 3, 2
on rank 2, mesh[1] = 2, 3, 4
on rank 2, mesh[2] = 11, 1, 12
on rank 2, mesh[3] = 12, 1, 2
on rank 2, nelemlocal = 4
on rank 3, mesh[0] = 1, 9, 3
on rank 3, mesh[1] = 10, 9, 1
on rank 3, mesh[2] = 11, 10, 1
on rank 3, nelemlocal = 3
Running on 4 processes
© www.soinside.com 2019 - 2024. All rights reserved.