00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028 #include "mdparallel.h"
00029
00030
00031
00032
00033
00034
00035 #ifdef _PARALLEL
00036
00037
00038
00039 void MDPARALLELFrame::initvars()
00040 {
00041 MDFrame::initvars();
00042
00043 INFO_Printf("[%d] numDomains = %d alloc requests and status\n",domainID,numDomains);
00044 inRequests = (MPI_Request *) malloc(numDomains*sizeof(MPI_Request));
00045 outRequests = (MPI_Request *) malloc(numDomains*sizeof(MPI_Request));
00046 inStatus = (MPI_Status *) malloc(numDomains*sizeof(MPI_Status ));
00047 outStatus = (MPI_Status *) malloc(numDomains*sizeof(MPI_Status));
00048 }
00049
00050 void MDPARALLELFrame::initparser()
00051 {
00052 MDFrame::initparser();
00053
00054 bindvar("myIX",&myIX,INT);
00055 bindvar("myIY",&myIY,INT);
00056 bindvar("myIZ",&myIZ,INT);
00057 bindvar("nXdoms",&nXdoms,INT);
00058 bindvar("nYdoms",&nYdoms,INT);
00059 bindvar("nZdoms",&nZdoms,INT);
00060 bindvar("myDomain",&myDomain,INT);
00061 bindvar("numDomains",&numDomains,INT);
00062 bindvar("myXmin",&myXmin,DOUBLE);
00063 bindvar("myYmin",&myYmin,DOUBLE);
00064 bindvar("myZmin",&myZmin,DOUBLE);
00065 bindvar("myXmax",&myXmax,DOUBLE);
00066 bindvar("myYmax",&myYmax,DOUBLE);
00067 bindvar("myZmax",&myZmax,DOUBLE);
00068
00069 }
00070
00071 int MDPARALLELFrame::exec(char *name)
00072 {
00073 if(MDFrame::exec(name)==0) return 0;
00074
00075 bindcommand(name,"Master_to_Slave",Master_to_Slave(command));
00076 bindcommand(name,"Broadcast_Atoms",{if(myDomain==0) Master_to_Slave("Broadcast_Atoms"); Broadcast_Atoms();});
00077 bindcommand(name,"Slave_to_Master_Atoms",{if(myDomain==0) Master_to_Slave("Broadcast_Atoms");Slave_to_Master_Atoms();});
00078 bindcommand(name,"Slave_chdir",{if(myDomain==0) Master_to_Slave("Slave_chdir");Slave_chdir();});
00079 bindcommand(name,"Partition_Domains",{if(myDomain==0) Master_to_Slave("Partition_Domains");Partition_Domains();});
00080 bindcommand(name,"Mark_Local_Atoms",{if(myDomain==0) Master_to_Slave("Mark_Local_Atoms");Mark_Local_Atoms();});
00081 bindcommand(name,"eval_parallel",{if(myDomain==0) Master_to_Slave("eval_parallel");eval_parallel();});
00082 bindcommand(name,"run_parallel",{if(myDomain==0) Master_to_Slave("run_parallel");run_parallel();});
00083 bindcommand(name,"alloc_all",alloc_all());
00084 bindcommand(name,"quit_all",{if(myDomain==0) Master_to_Slave("quit");quit_all();});
00085 bindcommand(name,"nebrelax_parallel",{if(myDomain==0) Master_to_Slave("nebrelax_parallel");nebrelax_parallel();});
00086 bindcommand(name,"allocchain_parallel",{if(myDomain==0) Master_to_Slave("allocchain_parallel");AllocChain_parallel();});
00087 bindcommand(name,"initRchain_parallel",{if(myDomain==0) Master_to_Slave("initRchain_parallel");initRchain_parallel();});
00088 bindcommand(name,"readRchain_parallel",{if(myDomain==0) Master_to_Slave("readRchain_parallel");readRchain_parallel();});
00089 bindcommand(name,"writeRchain_parallel",{if(myDomain==0) Master_to_Slave("writeRchain_parallel");writeRchain_parallel();});
00090 bindcommand(name,"copyRchaintoCN_parallel",{if(myDomain==0) Master_to_Slave("copyRchaintoCN_parallel");copyRchaintoCN_parallel();});
00091 bindcommand(name,"writefinalcnfile_parallel",{if(myDomain==0) Master_to_Slave("writefinalcnfile_parallel");writefinalcnfile_parallel(1,false);});
00092 return -1;
00093 }
00094
00095
00096 void MDPARALLELFrame::Alloc()
00097 {
00098 int size;
00099 size=_NP*allocmultiple;
00100
00101 MDFrame::Alloc();
00102
00103 Realloc(domainID,int,size);
00104 Realloc(_EPOT_IND_global,double,size);
00105 Realloc(_F_global,Vector3,size);
00106 Realloc(_VIRIAL_IND_global,Matrix33,size);
00107
00108 bindvar("domainID",domainID,INT);
00109 }
00110
00111 void MDPARALLELFrame::ParallelInit(int *pargc, char ***pargv)
00112 {
00113 MPI_Init(pargc,pargv);
00114 MPI_Comm_rank(MPI_COMM_WORLD, &myDomain);
00115 MPI_Comm_size(MPI_COMM_WORLD, &numDomains);
00116
00117 if(myDomain==0)
00118 {
00119 INFO_Printf("[%d]: I am the master in charge of %d processors.\n",
00120 myDomain, numDomains);
00121 }
00122 else
00123 {
00124 INFO_Printf("[%d]: Processor %d at your service\n",
00125 myDomain, myDomain);
00126 }
00127 }
00128
00129 void MDPARALLELFrame::WaitForCommand()
00130 {
00131 FILE *istream, *ostream; int mypipe[2];
00132
00133 INFO_Printf("[%d]: I am waiting for master's command.\n",myDomain);
00134
00135 while(1)
00136 {
00137 if (pipe (mypipe))
00138 {
00139 fprintf (stderr, "[%d]: Pipe failed.\n",myDomain);
00140 return;
00141 }
00142 istream = fdopen(mypipe[0], "r");
00143 ostream = fdopen(mypipe[1], "w");
00144 MPI_Bcast(command,MAXCMDLEN,MPI_CHAR,0,MPI_COMM_WORLD);
00145 INFO_Printf("[%d]: I receive command: %s\n",myDomain,command);
00146 fprintf(ostream,"%s \n",command);
00147 fprintf(ostream,"\n");
00148 fclose(ostream);
00149 parse_line(istream);
00150 fclose(istream);
00151 }
00152 }
00153
00154 void MDPARALLELFrame::Master_to_Slave(char *cmd)
00155 {
00156 if(command!=cmd) strcpy(command,cmd);
00157 MPI_Bcast(command,MAXCMDLEN,MPI_CHAR,0,MPI_COMM_WORLD);
00158 }
00159
00160 void MDPARALLELFrame::alloc_all()
00161 {
00162
00163 sprintf(command,"NP = %d",_NP);
00164 Master_to_Slave(command);
00165 }
00166
00167 void MDPARALLELFrame::quit_all()
00168 {
00169
00170
00171
00172 if(numDomains>1)
00173 {
00174 free(domBoundX);
00175 free(domBoundY);
00176 free(domBoundZ);
00177 }
00178
00179 quit();
00180 }
00181
00182 void MDPARALLELFrame::Partition_Domains()
00183 {
00184 Vector3 h;
00185 int i, j, k, i2, j2, k2, ndom, *flag;
00186
00187
00188
00189
00190 MPI_Bcast(&nXdoms,1,MPI_INT,0,MPI_COMM_WORLD);
00191 MPI_Bcast(&nYdoms,1,MPI_INT,0,MPI_COMM_WORLD);
00192 MPI_Bcast(&nZdoms,1,MPI_INT,0,MPI_COMM_WORLD);
00193
00194 if(myDomain==0)
00195 {
00196 INFO_Printf("nXdoms = %d nYdoms = %d nZdoms = %d numDomains = %d\n",
00197 nXdoms,nYdoms,nZdoms,numDomains);
00198 if(nXdoms*nYdoms*nZdoms!=numDomains)
00199 FATAL("product of nXdoms nYdoms nZdoms ("<<
00200 nXdoms*nYdoms*nZdoms<<" does not match numDomains");
00201 }
00202
00203 myIZ = myDomain % nZdoms;
00204 myIY = ((myDomain-myIZ)/nZdoms) % nYdoms;
00205 myIX = ((myDomain-myIZ)/nZdoms - myIY) / nYdoms;
00206
00207 if(myDomain == 0)
00208 {
00209 h=_H.height();
00210 if ((h[0]/nXdoms) < _RLIST) FATAL("nXdoms ("<<nXdoms<<" too large, RLIST="<<_RLIST);
00211 if ((h[1]/nYdoms) < _RLIST) FATAL("nYdoms ("<<nYdoms<<" too large, RLIST="<<_RLIST);
00212 if ((h[2]/nZdoms) < _RLIST) FATAL("nZdoms ("<<nZdoms<<" too large, RLIST="<<_RLIST);
00213 }
00214
00215
00216 flag = (int *) malloc(numDomains*sizeof(int));
00217 memset(flag,0,numDomains*sizeof(int));
00218 for(i=-1;i<=1;i++) for(j=-1;j<=1;j++) for(k=-1;k<=1;k++)
00219 {
00220 i2=(myIX+i+nXdoms) % nXdoms;
00221 j2=(myIY+j+nYdoms) % nYdoms;
00222 k2=(myIZ+k+nZdoms) % nZdoms;
00223
00224 ndom = (i2*nYdoms + j2)*nZdoms + k2;
00225 flag[ndom] = 1;
00226 }
00227 neighDoms = (int *) malloc(numDomains*sizeof(int));
00228 numNeighDoms = 0;
00229 for(i=0;i<numDomains;i++)
00230 {
00231 if(i==myDomain) continue;
00232 if(flag[i]==1)
00233 {
00234 neighDoms[numNeighDoms] = i;
00235 numNeighDoms++;
00236 }
00237 }
00238 INFO_Printf("[%d]: %d neighbor domains (",myDomain,numNeighDoms);
00239 for(i=0;i<numNeighDoms;i++)
00240 INFO_Printf("%d ",neighDoms[i]);
00241 INFO_Printf(")\n");
00242
00243 free(flag);
00244 }
00245
00246 void MDPARALLELFrame::Broadcast_Atoms()
00247 {
00248 int NP_old;
00249
00250
00251
00252
00253
00254 NP_old = _NP;
00255 MPI_Bcast(&_NP,1,MPI_INT,0,MPI_COMM_WORLD);
00256 if(_NP!=NP_old) Alloc();
00257
00258
00259 MPI_Bcast(&(_H[0][0]),9,MPI_DOUBLE,0,MPI_COMM_WORLD);
00260
00261
00262 MPI_Bcast(_SR, _NP*3,MPI_DOUBLE,0,MPI_COMM_WORLD);
00263 MPI_Bcast(_VSR,_NP*3,MPI_DOUBLE,0,MPI_COMM_WORLD);
00264
00265
00266 MPI_Bcast(fixed, _NP,MPI_INT,0,MPI_COMM_WORLD);
00267 MPI_Bcast(species,_NP,MPI_INT,0,MPI_COMM_WORLD);
00268 MPI_Bcast(group, _NP,MPI_INT,0,MPI_COMM_WORLD);
00269
00270
00271 MPI_Bcast(&_NNM, 1,MPI_DOUBLE,0,MPI_COMM_WORLD);
00272 MPI_Bcast(&_NIC, 1,MPI_DOUBLE,0,MPI_COMM_WORLD);
00273 }
00274
00275 void MDPARALLELFrame::Slave_to_Master_Atoms()
00276 {
00277 int i, nsend, n, idom, *nrecv;
00278 double *outBuf, **inBuf;
00279
00280
00281
00282
00283 MPI_Barrier(MPI_COMM_WORLD);
00284
00285
00286 if(myDomain!=0)
00287 {
00288 nsend = 0;
00289 for(i=0;i<_NP;i++)
00290 {
00291 if(fixed[i]==-1) continue;
00292 if(domainID[i]==myDomain)
00293 {
00294 nsend++;
00295 }
00296 }
00297 MPI_Isend(&nsend,1,MPI_INT,0,MSG_ATOM_LEN,MPI_COMM_WORLD,&outRequests[0]);
00298
00299 }
00300 else
00301 {
00302 nrecv = (int *)malloc(numDomains*sizeof(int));
00303 for(idom=1;idom<numDomains;idom++)
00304 MPI_Irecv(&nrecv[idom],1,MPI_INT,idom,MSG_ATOM_LEN,MPI_COMM_WORLD,&inRequests[idom]);
00305
00306 }
00307 if(myDomain!=0)
00308 MPI_Waitall(1, outRequests, outStatus);
00309 else
00310 MPI_Waitall(numDomains-1, inRequests+1, inStatus+1);
00311
00312
00313
00314 if(myDomain!=0)
00315 {
00316 outBuf = (double *) malloc(nsend*sizeof(double)*7);
00317
00318 n = 0;
00319 for(i=0;i<_NP;i++)
00320 {
00321 if(fixed[i]==-1) continue;
00322 if(domainID[i]==myDomain)
00323 {
00324 outBuf[n*7+0] = (double) i;
00325 outBuf[n*7+1] = _SR[i].x;
00326 outBuf[n*7+2] = _SR[i].y;
00327 outBuf[n*7+3] = _SR[i].z;
00328 outBuf[n*7+4] = _VSR[i].x;
00329 outBuf[n*7+5] = _VSR[i].y;
00330 outBuf[n*7+6] = _VSR[i].z;
00331 n++;
00332 }
00333 }
00334 MPI_Isend(outBuf,nsend*7,MPI_DOUBLE,0,MSG_ATOM,MPI_COMM_WORLD,&outRequests[0]);
00335 }
00336 else
00337 {
00338 inBuf = (double **) malloc(numDomains*sizeof(double *));
00339 for(idom=1;idom<numDomains;idom++)
00340 {
00341 inBuf[idom] = (double *) malloc(nrecv[idom]*sizeof(double)*7);
00342
00343 }
00344 for(idom=1;idom<numDomains;idom++)
00345 MPI_Irecv(inBuf[idom],nrecv[idom]*7,MPI_DOUBLE,idom,MSG_ATOM,MPI_COMM_WORLD,&inRequests[idom]);
00346 }
00347
00348 if(myDomain!=0)
00349 {
00350
00351 MPI_Waitall(1, outRequests, outStatus);
00352
00353 }
00354 else
00355 {
00356
00357 MPI_Waitall(numDomains-1, inRequests+1, inStatus+1);
00358
00359 }
00360
00361 MPI_Barrier(MPI_COMM_WORLD);
00362
00363
00364 if(myDomain==0)
00365 {
00366 for(idom=1;idom<numDomains;idom++)
00367 {
00368 for(n=0;n<nrecv[idom];n++)
00369 {
00370 i = (int) inBuf[idom][n*7+0];
00371 _SR[i].x = inBuf[idom][n*7+1];
00372 _SR[i].y = inBuf[idom][n*7+2];
00373 _SR[i].z = inBuf[idom][n*7+3];
00374 _VSR[i].x = inBuf[idom][n*7+4];
00375 _VSR[i].y = inBuf[idom][n*7+5];
00376 _VSR[i].z = inBuf[idom][n*7+6];
00377 }
00378 }
00379 }
00380
00381
00382
00383 if(myDomain!=0)
00384 {
00385 free(outBuf);
00386 }
00387 else
00388 {
00389 for(i=1;i<numDomains;i++) free(inBuf[i]);
00390 free(inBuf);
00391 free(nrecv);
00392 }
00393
00394 }
00395
00396 void MDPARALLELFrame::Slave_chdir()
00397 {
00398 char extname[200];
00399
00400 MPI_Bcast(dirname,1000,MPI_CHAR,0,MPI_COMM_WORLD);
00401 if(myDomain!=0)
00402 {
00403 LFile::SubHomeDir(dirname, extname);
00404 if(chdir(extname)!=0)
00405 {
00406 ERROR("cd "<<dirname<<" failed");
00407 }
00408 }
00409 }
00410
00411 void MDPARALLELFrame::potential_parallel()
00412 {
00413 int i;
00414
00415 if(numDomains>1)
00416 {
00417 Comm_Neighbor_Domains_Atoms();
00418 Mark_Local_Atoms();
00419 }
00420 potential();
00421
00422
00423
00424 if(numDomains>1)
00425 {
00426 _EPOT=0;
00427 for(i=0;i<_NP;i++)
00428 {
00429 if(domainID[i]!=myDomain)
00430 {
00431 _EPOT_IND[i]=0;
00432 _F[i].clear();
00433 _VIRIAL_IND[i].clear();
00434 }
00435 }
00436 }
00437 }
00438
00439
00440 void MDPARALLELFrame::Master_Collect_Results()
00441 {
00442 int i;
00443
00444 MPI_Reduce(_EPOT_IND,_EPOT_IND_global,_NP,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
00445 MPI_Reduce(_F,_F_global,_NP*3,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
00446 MPI_Reduce(_VIRIAL_IND,_VIRIAL_IND_global,_NP*9,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
00447
00448 memmove(_EPOT_IND,_EPOT_IND_global,_NP*sizeof(double));
00449 memmove(_F,_F_global,_NP*3*sizeof(double));
00450 memmove(_VIRIAL_IND,_VIRIAL_IND_global,_NP*9*sizeof(double));
00451
00452 if(myDomain==0)
00453 {
00454 _EPOT=0; _VIRIAL.clear();
00455 for(i=0;i<_NP;i++)
00456 {
00457 _EPOT+=_EPOT_IND[i];
00458 _VIRIAL+=_VIRIAL_IND[i];
00459 }
00460 }
00461 }
00462
00463 void MDPARALLELFrame::eval_parallel()
00464 {
00465
00466
00467
00468 Mark_Local_Atoms();
00469
00470 potential_parallel();
00471
00472 Master_Collect_Results();
00473
00474 if(myDomain==0)
00475 {
00476 calcprop();
00477 printResult();
00478 }
00479 }
00480
00481 void MDPARALLELFrame::run_parallel()
00482 {
00483
00484
00485
00486
00487
00488 int itmp;
00489 double pres, omega0;
00490 class Matrix33 prp, h0inv, h0invtran;
00491 int nspec, *spec;
00492
00493
00494 if (strcmp(ensemble_type,"NVE")==0)
00495 algorithm_id = 10000;
00496 else if (strcmp(ensemble_type,"NVT")==0)
00497 algorithm_id = 20000;
00498 else if (strcmp(ensemble_type,"NPH")==0)
00499 algorithm_id = 30000;
00500 else if (strcmp(ensemble_type,"NPT")==0)
00501 algorithm_id = 40000;
00502 else
00503 {
00504 algorithm_id = 100;
00505 ERROR("unknown ensemble_type("<<ensemble_type<<" use NVE\n"
00506 "choices: NVE, NVT, NPH, NPT");
00507 }
00508 if (strcmp(integrator_type,"Gear6")==0)
00509 algorithm_id += 0;
00510 else if (strcmp(integrator_type,"VVerlet")==0)
00511 algorithm_id += 100;
00512 else
00513 {
00514 ERROR("unknown ensemble_type("<<ensemble_type<<" use Gear6\n"
00515 "choices: Gear6 VVerlet");
00516 }
00517
00518 algorithm_id += implementation_type;
00519 INFO("algorithm_id = "<<algorithm_id);
00520
00521 itmp = algorithm_id / 10000;
00522
00523
00524
00525
00526 SHtoR();
00527 Mark_Local_Atoms();
00528
00529
00530 if((itmp==3)||(itmp==4))
00531 {
00532
00533 if(myDomain==0)
00534 {
00535 pres=_EXTSTRESS.trace()/3;
00536 prp=_EXTSTRESS;
00537 prp[0][0]-=pres;
00538 prp[1][1]-=pres;
00539 prp[2][2]-=pres;
00540 prp*=_EXTSTRESSMUL;
00541 pres*=_EXTSTRESSMUL;
00542 pres+=_EXTPRESSADD;
00543
00544 if(_H0.det()<1e-10)
00545 FATAL("you need to specify H0 by calling saveH "
00546 "before using the Parrinello-Rahman method");
00547
00548 h0inv=_H0.inv();
00549 h0invtran=h0inv.tran();
00550 omega0=_H0.det()*(1-_VACUUMRATIO);
00551 _SIGMA=((h0inv*prp)*h0invtran)*omega0;
00552 _SIGMA/=160.2e3;
00553 }
00554 }
00555
00556
00557 MPI_Bcast(&_SIGMA[0][0],9,MPI_DOUBLE,0,MPI_COMM_WORLD);
00558 MPI_Bcast(&_ATOMMASS,MAXSPECIES,MPI_DOUBLE,0,MPI_COMM_WORLD);
00559 MPI_Bcast(&vt2,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
00560 MPI_Bcast(&_TIMESTEP,1,MPI_DOUBLE,0,MPI_COMM_WORLD);
00561 MPI_Bcast(ensemble_type,100,MPI_CHAR,0,MPI_COMM_WORLD);
00562 MPI_Bcast(integrator_type,100,MPI_CHAR,0,MPI_COMM_WORLD);
00563
00564
00565 nspec = 16;
00566 spec = (int *) malloc(nspec*sizeof(int));
00567 if(myDomain==0)
00568 {
00569 spec[0] = totalsteps;
00570 spec[1] = equilsteps;
00571 spec[2] = 0;
00572 spec[3] = 0;
00573 spec[4] = savecn;
00574 spec[5] = savecnfreq;
00575 spec[6] = saveprop;
00576 spec[7] = savepropfreq;
00577 spec[8] = savecn;
00578 spec[9] = printfreq;
00579 spec[10] = plotfreq;
00580 spec[11] = autowritegiffreq;
00581 spec[12] = conj_fixbox;
00582 spec[13] = algorithm_id;
00583 spec[14] = _NNM;
00584 spec[15] = _NIC;
00585 }
00586 MPI_Bcast(spec, nspec, MPI_INT, 0,MPI_COMM_WORLD);
00587 if(myDomain!=0)
00588 {
00589 totalsteps = spec[0];
00590 equilsteps = spec[1];
00591
00592
00593 savecn = spec[4];
00594 savecnfreq = spec[5];
00595 saveprop = spec[6];
00596 savepropfreq = spec[7];
00597 savecn = spec[8];
00598 printfreq = spec[9];
00599 plotfreq = spec[10];
00600 autowritegiffreq = spec[11];
00601 conj_fixbox = spec[12];
00602 algorithm_id = spec[13];
00603 _NNM = spec[14];
00604 _NIC = spec[15];
00605 }
00606 free(spec);
00607
00608
00609
00610 for(curstep=0;curstep<totalsteps;curstep++)
00611 {
00612 while(win!=NULL)
00613 {
00614 if(win->IsPaused()) sleep(1);
00615 else break;
00616 }
00617
00618
00619
00620 if(curstep%savepropfreq==0)
00621 INFO("["<<myDomain<<"]: curstep="<<curstep<<" EPOT="<<_EPOT);
00622
00623 winplot();
00624
00625
00626
00627 if(curstep>=equilsteps)
00628 {
00629 if(saveprop)
00630 {
00631 if((curstep%savepropfreq)==0)
00632 {
00633 Slave_to_Master_Atoms();
00634
00635 potential_parallel();
00636 Master_Collect_Results();
00637 if(myDomain==0)
00638 {
00639 calcprop();
00640 calcoutput();
00641 pf.write(this);
00642 }
00643 potential_parallel();
00644 }
00645 }
00646 if(savecn)
00647 if((curstep%savecnfreq)==0&&(curstep!=0))
00648 {
00649 Slave_to_Master_Atoms();
00650 if(myDomain==0)
00651 {
00652 intercn.write(this,zipfiles,true);
00653 }
00654 }
00655 }
00656
00657
00658
00659 step_parallel();
00660
00661
00662
00663 if(win!=NULL)
00664 if(win->IsAlive())
00665 if(autowritegiffreq>0)
00666 if((curstep%autowritegiffreq)==0)
00667 {
00668 win->LockWritegif();
00669 }
00670 }
00671
00672 Slave_to_Master_Atoms();
00673 if(saveprop)
00674 {
00675
00676 potential_parallel();
00677 Master_Collect_Results();
00678 if(myDomain==0)
00679 {
00680 calcprop();
00681 pf.write(this);
00682 }
00683 }
00684 }
00685
00686 void MDPARALLELFrame::step_parallel()
00687 {
00688
00689
00690 int i;
00691
00692
00693
00694
00695
00696 for(i=0;i<_NP;i++)
00697 if(domainID[i]!=myDomain)
00698 {
00699 _VSR[i].clear();
00700 _F[i].clear();
00701 }
00702
00703
00704 switch(algorithm_id) {
00705 case(10000): NVE_Gear6(); break;
00706 case(20000): NVT_Gear6(); break;
00707 case(30000): NPH_Gear6(); break;
00708 case(40000): NPT_Gear6(); break;
00709 case(10100): NVE_VVerlet(curstep); break;
00710 case(20100): NVT_VVerlet_Implicit(curstep); break;
00711 case(20101): NVT_VVerlet_Explicit_1(curstep); break;
00712 case(20102): NVT_VVerlet_Explicit_2(curstep); break;
00713 case(30100): NPH_VVerlet(curstep); break;
00714 case(40100): NPT_VVerlet_Implicit(curstep); break;
00715 case(40101): NPT_VVerlet_Explicit_1(curstep); break;
00716 case(40102): NPT_VVerlet_Explicit_2(curstep); break;
00717 default: FATAL("unknown algorithm_id ("<<algorithm_id<<")"); break;
00718 }
00719
00720 }
00721
00722
00723 void MDPARALLELFrame::Comm_Neighbor_Domains_Atoms()
00724 {
00725 int i, n, idom, yourDomain, yourIX, yourIY, yourIZ;
00726 int *nsend, *nrecv; double **inBuf, **outBuf;
00727 double dsx, dsy, dsz, sx0, sy0, sz0, sxw, syw, szw;
00728 Vector3 si, h;
00729
00730 h=_H.height();
00731 dsx = fabs(1.4*_RLIST/h[0]);
00732 dsy = fabs(1.4*_RLIST/h[1]);
00733 dsz = fabs(1.4*_RLIST/h[2]);
00734
00735 nsend = (int *) malloc(numNeighDoms*sizeof(int));
00736 nrecv = (int *) malloc(numNeighDoms*sizeof(int));
00737 inBuf = (double **) malloc(numNeighDoms*sizeof(double *));
00738 outBuf= (double **) malloc(numNeighDoms*sizeof(double *));
00739
00740
00741 for(idom=0;idom<numNeighDoms;idom++)
00742 {
00743 yourDomain = neighDoms[idom];
00744 MPI_Irecv(&nrecv[idom],1,MPI_INT,yourDomain,
00745 MSG_SKIN_LEN,MPI_COMM_WORLD,&inRequests[idom]);
00746 }
00747 for(idom=0;idom<numNeighDoms;idom++)
00748 {
00749 yourDomain = neighDoms[idom];
00750 yourIZ = yourDomain % nZdoms;
00751 yourIY = ((yourDomain-myIZ)/nZdoms) % nYdoms;
00752 yourIX = ((yourDomain-myIZ)/nZdoms - myIY) / nYdoms;
00753
00754 sx0 = (yourIX+0.5)/nXdoms-0.5; sxw = 0.5/nXdoms;
00755 sy0 = (yourIY+0.5)/nYdoms-0.5; syw = 0.5/nYdoms;
00756 sz0 = (yourIZ+0.5)/nZdoms-0.5; szw = 0.5/nZdoms;
00757
00758 nsend[idom]=0;
00759 for(i=0;i<_NP;i++)
00760 {
00761 if(fixed[i]==-1) continue;
00762 if(domainID[i]!=myDomain) continue;
00763
00764 si=_SR[i];
00765 si.x-=sx0; si.y-=sy0; si.z-=sz0;
00766 si.subint();
00767
00768
00769 if((si.x >= -sxw-dsx)&&(si.x < sxw+dsx)&&
00770 (si.y >= -syw-dsy)&&(si.y < syw+dsy)&&
00771 (si.z >= -szw-dsz)&&(si.z < szw+dsz))
00772 {
00773 nsend[idom] ++;
00774 }
00775 }
00776
00777 MPI_Isend(&nsend[idom],1,MPI_INT,yourDomain,
00778 MSG_SKIN_LEN,MPI_COMM_WORLD,&outRequests[idom]);
00779
00780 outBuf[idom] = (double *) malloc(nsend[idom]*7*sizeof(double));
00781 n = 0;
00782 for(i=0;i<_NP;i++)
00783 {
00784 if(fixed[i]==-1) continue;
00785 if(domainID[i]!=myDomain) continue;
00786
00787 si=_SR[i];
00788 si.x-=sx0; si.y-=sy0; si.z-=sz0;
00789 si.subint();
00790
00791
00792 if((si.x >= -sxw-dsx)&&(si.x < sxw+dsx)&&
00793 (si.y >= -syw-dsy)&&(si.y < syw+dsy)&&
00794 (si.z >= -szw-dsz)&&(si.z < szw+dsz))
00795 {
00796 outBuf[idom][n*7+0] = (double) i;
00797 outBuf[idom][n*7+1] = _SR[i].x;
00798 outBuf[idom][n*7+2] = _SR[i].y;
00799 outBuf[idom][n*7+3] = _SR[i].z;
00800 outBuf[idom][n*7+4] = _VSR[i].x;
00801 outBuf[idom][n*7+5] = _VSR[i].y;
00802 outBuf[idom][n*7+6] = _VSR[i].z;
00803 n++;
00804 }
00805 }
00806 if(n!=nsend[idom])
00807 FATAL("n="<<n<<" nsend["<<idom<<"]="<<nsend[idom]);
00808 }
00809 MPI_Waitall(numNeighDoms, outRequests,outStatus);
00810 MPI_Waitall(numNeighDoms, inRequests, inStatus );
00811
00812
00813
00814
00815
00816
00817 for(idom=0;idom<numNeighDoms;idom++)
00818 {
00819 yourDomain = neighDoms[idom];
00820 inBuf[idom] = (double *) malloc(nrecv[idom]*7*sizeof(double));
00821 MPI_Irecv(inBuf[idom],nrecv[idom]*7,MPI_DOUBLE,yourDomain,
00822 MSG_SKIN,MPI_COMM_WORLD,&inRequests[idom]);
00823 }
00824
00825 for(idom=0;idom<numNeighDoms;idom++)
00826 {
00827 yourDomain = neighDoms[idom];
00828
00829
00830 MPI_Isend(outBuf[idom],nsend[idom]*7,MPI_DOUBLE,yourDomain,
00831 MSG_SKIN,MPI_COMM_WORLD,&outRequests[idom]);
00832 }
00833
00834 MPI_Waitall(numNeighDoms, outRequests,outStatus);
00835
00836 MPI_Waitall(numNeighDoms, inRequests, inStatus );
00837
00838
00839
00840 for(idom=0;idom<numNeighDoms;idom++)
00841 {
00842
00843 for(n=0;n<nrecv[idom];n++)
00844 {
00845 i = (int) inBuf[idom][n*7+0];
00846 _SR[i].x = inBuf[idom][n*7+1];
00847 _SR[i].y = inBuf[idom][n*7+2];
00848 _SR[i].z = inBuf[idom][n*7+3];
00849 _VSR[i].x = inBuf[idom][n*7+4];
00850 _VSR[i].y = inBuf[idom][n*7+5];
00851 _VSR[i].z = inBuf[idom][n*7+6];
00852 }
00853 }
00854
00855 for(i=0;i<numNeighDoms;i++) free(inBuf [i]);
00856 for(i=0;i<numNeighDoms;i++) free(outBuf[i]);
00857 free(inBuf);
00858 free(outBuf);
00859 free(nsend);
00860 free(nrecv);
00861
00862 }
00863
00864 void MDPARALLELFrame::Mark_Local_Atoms()
00865 {
00866 int i, ix, iy, iz, np0, np1;
00867 double dsx, dsy, dsz, sx0, sy0, sz0, sxw, syw, szw;
00868 Vector3 si, h;
00869
00870
00871
00872
00873 h=_H.height();
00874 dsx = fabs(1.4*_RLIST/h[0]);
00875 dsy = fabs(1.4*_RLIST/h[1]);
00876 dsz = fabs(1.4*_RLIST/h[2]);
00877
00878 sx0 = (myIX+0.5)/nXdoms-0.5; sxw = 0.5/nXdoms;
00879 sy0 = (myIY+0.5)/nYdoms-0.5; syw = 0.5/nYdoms;
00880 sz0 = (myIZ+0.5)/nZdoms-0.5; szw = 0.5/nZdoms;
00881
00882 np0=0; np1=0;
00883 for(i=0;i<_NP;i++)
00884 {
00885 if(fixed[i]==-1) fixed[i]=0;
00886 si=_SR[i]; si.subint();
00887 ix = (int) floor((si.x+0.5)*nXdoms);
00888 iy = (int) floor((si.y+0.5)*nYdoms);
00889 iz = (int) floor((si.z+0.5)*nZdoms);
00890
00891
00892 domainID[i] = (ix*nYdoms + iy)*nZdoms + iz;
00893
00894
00895
00896 si.x-=sx0; si.y-=sy0; si.z-=sz0;
00897 si.subint();
00898 if((si.x < -sxw-dsx)||(si.x >= sxw+dsx)||
00899 (si.y < -syw-dsy)||(si.y >= syw+dsy)||
00900 (si.z < -szw-dsz)||(si.z >= szw+dsz))
00901 {
00902 fixed[i] = -1;
00903 if(domainID[i]==myDomain)
00904 {
00905 INFO_Printf("[%d]: atom i (%e %e %e) si (%e %e %e) fixed = %d\n",
00906 myDomain,_SR[i].x,_SR[i].y,_SR[i].z,si.x,si.y,si.z,fixed[i]);
00907 FATAL("core atoms outside skin!");
00908 }
00909 }
00910 if(domainID[i]==myDomain) np0++;
00911 if(fixed[i]!=-1) np1++;
00912
00913 }
00914 INFO_Printf("[%d]: number of core atoms = %d core + skin atoms = %d\n",
00915 myDomain,np0,np1);
00916 }
00917
00918 void MDPARALLELFrame::AllocChain_parallel()
00919 {
00920 int n, size;
00921
00922 INFO_Printf("AllocChain_parallel[%d]: \n", myDomain);
00923
00924 MPI_Bcast(constrainatoms, MAXCONSTRAINATOMS, MPI_INT, 0,MPI_COMM_WORLD);
00925 MPI_Bcast(nebspec, 10, MPI_INT, 0,MPI_COMM_WORLD);
00926 MPI_Bcast(&_TIMESTEP, 1, MPI_DOUBLE,0,MPI_COMM_WORLD);
00927 MPI_Bcast(&_CHAINLENGTH, 1, MPI_INT, 0,MPI_COMM_WORLD);
00928 MPI_Bcast(&totalsteps, 1, MPI_INT, 0,MPI_COMM_WORLD);
00929 MPI_Bcast(&curstep, 1, MPI_INT, 0,MPI_COMM_WORLD);
00930 MPI_Bcast(&printfreq, 1, MPI_INT, 0,MPI_COMM_WORLD);
00931
00932 MPI_Bcast(&_NIMAGES, 1, MPI_INT, 0,MPI_COMM_WORLD);
00933 MPI_Bcast(&_TORSIONSIM, 1, MPI_INT, 0,MPI_COMM_WORLD);
00934 MPI_Bcast(&_BENDSIM, 1, MPI_INT, 0,MPI_COMM_WORLD);
00935 MPI_Bcast(&_ENABLE_SWITCH,1, MPI_INT, 0,MPI_COMM_WORLD);
00936 MPI_Bcast(&_constrainedMD,1, MPI_INT, 0,MPI_COMM_WORLD);
00937 MPI_Bcast(&_ENABLE_FEXT, 1, MPI_INT, 0,MPI_COMM_WORLD);
00938
00939
00940 n = constrainatoms[0];
00941 size = sizeof(Vector3)*n;
00942 if (_Rc0==NULL) _Rc0 = (Vector3 *) malloc(size);
00943 if (_Rc1==NULL) _Rc1 = (Vector3 *) malloc(size);
00944 if (_Rc2==NULL) _Rc2 = (Vector3 *) malloc(size);
00945 if (_Fc0==NULL) _Fc0 = (Vector3 *) malloc(size);
00946 if (_Tan==NULL) _Tan = (Vector3 *) malloc(size);
00947
00948 memset(_Rc0,0,size);
00949 memset(_Rc1,0,size);
00950 memset(_Rc2,0,size);
00951 memset(_Fc0,0,size);
00952 memset(_Tan,0,size);
00953
00954 Realloc(_nebinterp,double,(_CHAINLENGTH+1));
00955 memset(_nebinterp,0,sizeof(double)*(_CHAINLENGTH+1));
00956 }
00957
00958 void MDPARALLELFrame::initRchain_parallel()
00959 {
00960 int i, j, n, ipt;
00961 double s;
00962 Vector3 ds;
00963
00964
00965 Broadcast_Atoms();
00966
00967 SHtoR();
00968
00969
00970 if(myDomain==0)
00971 {
00972 if(_SR1 == NULL || _SR2 == NULL)
00973 {
00974 if(_SR1 == NULL)
00975 ERROR("config1 is not set!");
00976 else
00977 ERROR("config2 is not set!");
00978 return;
00979 }
00980
00981 }
00982 else
00983 {
00984 Free(_SR1); Realloc(_SR1,Vector3,_NP);
00985 Free(_SR2); Realloc(_SR2,Vector3,_NP);
00986 }
00987
00988
00989 MPI_Bcast(_SR1, _NP*3,MPI_DOUBLE,0,MPI_COMM_WORLD);
00990 MPI_Bcast(_SR2, _NP*3,MPI_DOUBLE,0,MPI_COMM_WORLD);
00991
00992
00993 for(j=0;j<=_CHAINLENGTH;j++)
00994 _nebinterp[j] = (1.0*j)/_CHAINLENGTH;
00995
00996 s = _nebinterp[myDomain];
00997 interpCN(s);
00998
00999
01000 caldscom12();
01001 INFO_Printf("dscom12[%d] = (%f %f %f)\n",myDomain,dscom12.x,dscom12.y,dscom12.z);
01002
01003 n = constrainatoms[0];
01004 for(i=0;i<n;i++)
01005 {
01006 ipt=constrainatoms[i+1];
01007 ds=_SR2[ipt]-dscom12; ds-=_SR1[ipt]; ds.subint();
01008 ds*=s; ds+=_SR1[ipt];
01009 _Rc0[i]=_H*ds;
01010 }
01011
01012 }
01013
01014 void MDPARALLELFrame::nebrelax_parallel()
01015 {
01016
01017
01018
01019
01020 int n, size, i, ipt, j, nin, nout;
01021 double s, dEmax, dEmin, r2, fr, fm2, minFm;
01022 double Fspring, springK, dR1norm, dR1norm2, dR2norm, dR2norm2, TanMag2;
01023 double *Ec, *Ec_global, *Fm, *Fm_global;
01024 Vector3 ds0, ds, dR1, dR2;
01025 Matrix33 hinv;
01026 FILE *fp;
01027
01028 INFO_Printf("nebrelax[%d]: NEB Relax Parallel\n", myDomain);
01029
01030 if( numDomains != (_CHAINLENGTH+1) )
01031 {
01032 ERROR("number of cpus("<<numDomains<<") does not match chainlength("<<_CHAINLENGTH<<")+1");
01033 return;
01034 }
01035
01036 s = _nebinterp[myDomain];
01037 hinv = _H.inv();
01038 n = constrainatoms[0];
01039
01040
01041 Ec=(double *)malloc(sizeof(double)*(_CHAINLENGTH+1));
01042 Ec_global=(double *)malloc(sizeof(double)*(_CHAINLENGTH+1));
01043 Fm=(double *)malloc(sizeof(double)*(_CHAINLENGTH+1));
01044 Fm_global=(double *)malloc(sizeof(double)*(_CHAINLENGTH+1));
01045
01046 if(myDomain==0)
01047 {
01048 fp=fopen("nebeng.out","w");
01049 }
01050
01051
01052 constrain_dist=0;
01053 for(i=0;i<n;i++)
01054 {
01055 ipt=constrainatoms[i+1];
01056 ds0=(_SR2[ipt]-dscom12)-_SR1[ipt];
01057 constrain_dist+=ds0.norm2();
01058 }
01059
01060 INFO_Printf("nebrelax[%d]: constrain_dist=%e s=%e\n", myDomain, constrain_dist, s);
01061
01062
01063 if(nebspec[0]==1)
01064 {
01065 for(i=0;i<n;i++)
01066 {
01067 ipt=constrainatoms[i+1];
01068 fixed[ipt]=1;
01069 }
01070 }
01071
01072
01073 step0 = curstep;
01074 for(curstep=step0;curstep<(step0 + totalsteps);curstep++)
01075 {
01076
01077
01078
01079
01080
01081 for(i=0;i<n;i++)
01082 {
01083 ipt=constrainatoms[i+1];
01084 _SR[ipt]=hinv*_Rc0[i];
01085 }
01086
01087
01088 if(nebspec[0]==1) relax();
01089
01090 MDFrame::call_potential();
01091
01092 memset(Ec,0,sizeof(double)*(_CHAINLENGTH+1));
01093 Ec[myDomain]=_EPOT;
01094 MPI_Allreduce(Ec,Ec_global,(_CHAINLENGTH+1),MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
01095 memcpy(Ec,Ec_global,sizeof(double)*(_CHAINLENGTH+1));
01096
01097
01098
01099 for(i=0;i<n;i++)
01100 {
01101 ipt=constrainatoms[i+1];
01102 _Fc0[i]=_F[ipt];
01103 }
01104
01105
01106 nin = nout = 0;
01107 if(myDomain>0)
01108 {
01109 MPI_Irecv(_Rc1,n*3,MPI_DOUBLE,myDomain-1,
01110 MSG_NEB_ATOM,MPI_COMM_WORLD,&inRequests[nin]);
01111 nin ++;
01112 MPI_Isend(_Rc0,n*3,MPI_DOUBLE,myDomain-1,
01113 MSG_NEB_ATOM,MPI_COMM_WORLD,&outRequests[nout]);
01114 nout ++;
01115 }
01116 if(myDomain<_CHAINLENGTH)
01117 {
01118 MPI_Irecv(_Rc2,n*3,MPI_DOUBLE,myDomain+1,
01119 MSG_NEB_ATOM,MPI_COMM_WORLD,&inRequests[nin]);
01120 nin ++;
01121 MPI_Isend(_Rc0,n*3,MPI_DOUBLE,myDomain+1,
01122 MSG_NEB_ATOM,MPI_COMM_WORLD,&outRequests[nout]);
01123 nout ++;
01124 }
01125 MPI_Waitall(nout, outRequests,outStatus);
01126 MPI_Waitall(nin, inRequests, inStatus );
01127
01128
01129 if((myDomain>0)&&(myDomain<_CHAINLENGTH))
01130 {
01131 if (((Ec[myDomain+1]-Ec[myDomain]>0) && (Ec[myDomain-1]-Ec[myDomain]>0)) ||
01132 ((Ec[myDomain+1]-Ec[myDomain]<0) && (Ec[myDomain-1]-Ec[myDomain]<0)))
01133 {
01134 dEmax = max(fabs(Ec[myDomain+1]-Ec[myDomain]),fabs(Ec[myDomain-1]-Ec[myDomain]));
01135 dEmin = min(fabs(Ec[myDomain+1]-Ec[myDomain]),fabs(Ec[myDomain-1]-Ec[myDomain]));
01136
01137 if (Ec[myDomain+1]>Ec[myDomain-1])
01138 {
01139 for(i=0;i<n;i++)
01140 {
01141 _Tan[i] = (_Rc2[i]-_Rc0[i])*dEmax + (_Rc0[i]-_Rc1[i])*dEmin;
01142 }
01143 }
01144 else
01145 {
01146 for(i=0;i<n;i++)
01147 {
01148 _Tan[i] = (_Rc2[i]-_Rc0[i])*dEmin + (_Rc0[i]-_Rc1[i])*dEmax;
01149 }
01150 }
01151 }
01152 else
01153 {
01154 if (Ec[myDomain+1]>Ec[myDomain-1])
01155 {
01156 for(i=0;i<n;i++)
01157 {
01158 _Tan[i] = _Rc2[i]-_Rc0[i];
01159 }
01160 }
01161 else
01162 {
01163 for(i=0;i<n;i++)
01164 {
01165 _Tan[i] = _Rc0[i]-_Rc1[i];
01166 }
01167 }
01168 }
01169
01170 TanMag2 = 0;
01171 for(i=0;i<n;i++)
01172 {
01173 TanMag2 += _Tan[i].norm2();
01174 }
01175 }
01176
01177
01178
01179 memset(Fm,0,sizeof(double)*(_CHAINLENGTH+1));
01180 if((myDomain>0)&&(myDomain<_CHAINLENGTH))
01181 {
01182 fr=0;
01183 for(i=0;i<n;i++)
01184 fr+=dot(_Fc0[i],_Tan[i]);
01185 fr /= TanMag2;
01186
01187 fm2=0;
01188 for(i=0;i<n;i++)
01189 {
01190 _Fc0[i] -= (_Tan[i] * fr);
01191 fm2 += _Fc0[i].norm2();
01192 }
01193 Fm[myDomain] = sqrt(fm2);
01194 }
01195 else
01196 {
01197 Fm[myDomain] = 0;
01198 }
01199
01200 MPI_Allreduce(Fm,Fm_global,(_CHAINLENGTH+1),MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
01201 memcpy(Fm,Fm_global,sizeof(double)*(_CHAINLENGTH+1));
01202 minFm = Fm[1];
01203 for(j=2;j<_CHAINLENGTH;j++) minFm = min(minFm,Fm[j]);
01204
01205
01206 springK = minFm*_CHAINLENGTH;
01207 if((myDomain>0)&&(myDomain<_CHAINLENGTH))
01208 {
01209
01210 dR2norm2=0;
01211 for(i=0;i<n;i++)
01212 {
01213 dR2=_Rc2[i]-_Rc0[i];
01214 dR2norm2+=dR2.norm2();
01215 }
01216 dR2norm=sqrt(dR2norm2);
01217
01218
01219 dR1norm2=0;
01220 for(i=0;i<n;i++)
01221 {
01222 dR1=_Rc0[i]-_Rc1[i];
01223 dR1norm2+=dR1.norm2();
01224 }
01225 dR1norm=sqrt(dR1norm2);
01226
01227 Fspring = springK*(dR2norm - dR1norm);
01228 for(i=0;i<n;i++)
01229 _Fc0[i] += _Tan[i] * (Fspring/sqrt(TanMag2)) ;
01230 }
01231
01232 if(myDomain==0)
01233 {
01234 if(curstep%printfreq==0)
01235 {
01236 INFO_Printf("curstep = %d\n",curstep);
01237 for(j=0;j<=_CHAINLENGTH;j++)
01238 INFO_Printf("%20.12e %25.15e %25.15e \n",_nebinterp[j],Ec[j]-Ec[0], Fm[j]);
01239 fprintf(fp,"%d ",curstep);
01240 for(j=0;j<=_CHAINLENGTH;j++)
01241 fprintf(fp,"%20.12e %25.15e %25.15e ",_nebinterp[j], Ec[j]-Ec[0], Fm[j]);
01242 fprintf(fp,"\n");
01243 fflush(fp);
01244 }
01245 }
01246
01247 if((myDomain>0)&&(myDomain<_CHAINLENGTH))
01248 {
01249
01250 for(i=0;i<n;i++)
01251 {
01252 _Rc0[i]+=_Fc0[i]*_TIMESTEP;
01253 }
01254 }
01255 }
01256
01257 if(myDomain==0)
01258 {
01259 if(curstep%printfreq==0)
01260 {
01261 INFO_Printf("curstep = %d\n",curstep);
01262 for(j=0;j<=_CHAINLENGTH;j++)
01263 INFO_Printf("%20.12e %25.15e %25.15e \n",_nebinterp[j],Ec[j]-Ec[0], Fm[j]);
01264 fprintf(fp,"%d ",curstep);
01265 for(j=0;j<=_CHAINLENGTH;j++)
01266 fprintf(fp,"%20.12e %25.15e %25.15e ",_nebinterp[j], Ec[j]-Ec[0], Fm[j]);
01267 fprintf(fp,"\n");
01268 fflush(fp);
01269 }
01270 }
01271
01272 free(Ec);
01273 free(Ec_global);
01274 free(Fm);
01275 free(Fm_global);
01276
01277 if(myDomain==0) fclose(fp);
01278
01279 INFO_Printf("nebrelax[%d]: exit\n",myDomain);
01280
01281 }
01282
01283
01284 int MDPARALLELFrame::readRchain_parallel()
01285 {
01286 int i;
01287 char fname[200], fullname[200];
01288 char *buffer; char *pp, *q;
01289 FILE *fp;
01290
01291 MPI_Bcast(incnfile, 200, MPI_CHAR, 0,MPI_COMM_WORLD);
01292
01293 sprintf(fname,"%s.cpu%02d",incnfile,myDomain);
01294 LFile::SubHomeDir(fname, fullname);
01295 INFO("filename="<<fullname);
01296 LFile::LoadToString(fullname,buffer,0);
01297
01298 pp=buffer;
01299
01300 q=pp; pp=strchr(pp,'\n'); if(pp) *(char *)pp++=0;
01301 sscanf(q, "%d", &_CHAINLENGTH);
01302
01303 if( numDomains != (_CHAINLENGTH+1) )
01304 {
01305 ERROR("number of cpus("<<numDomains<<") does not match chainlength("<<_CHAINLENGTH<<")+1");
01306 return -1;
01307 }
01308
01309 q=pp; pp=strchr(pp,'\n'); if(pp) *(char *)pp++=0;
01310 sscanf(q, "%d", constrainatoms);
01311
01312 INFO("constrainatoms[0]="<<constrainatoms[0]);
01313
01314 if (_Rc0==NULL)
01315 {
01316 ERROR("need to call AllocChain_parallel() beforehand");
01317 return -1;
01318 }
01319
01320 for(i=0;i<constrainatoms[0];i++)
01321 {
01322 q=pp; pp=strchr(pp,'\n'); if(pp) *(char *)pp++=0;
01323 sscanf(q, "%d", constrainatoms+i+1);
01324 }
01325
01326 for(i=0;i<constrainatoms[0];i++)
01327 {
01328 q=pp; pp=strchr(pp,'\n'); if(pp) *(char *)pp++=0;
01329 sscanf(q, "%lf %lf %lf",&(_Rc0[i].x),&(_Rc0[i].y),&(_Rc0[i].z));
01330 }
01331
01332 Free(buffer);
01333 INFO_Printf("readRchain_parallel[%d] finished\n",myDomain);
01334 return 0;
01335 }
01336
01337
01338 int MDPARALLELFrame::writeRchain_parallel()
01339 {
01340 int i;
01341 char fname[200], fullname[200];
01342 FILE *fp;
01343
01344 MPI_Bcast(finalcnfile, 200, MPI_CHAR, 0,MPI_COMM_WORLD);
01345
01346 sprintf(fname,"%s.cpu%02d",finalcnfile,myDomain);
01347 LFile::SubHomeDir(fname, fullname);
01348
01349 fp=fopen(fullname,"w");
01350
01351 fprintf(fp,"%d\n",_CHAINLENGTH);
01352 fprintf(fp,"%d\n",constrainatoms[0]);
01353
01354 for(i=0;i<constrainatoms[0];i++)
01355 fprintf(fp,"%d\n",constrainatoms[i+1]);
01356
01357 for(i=0;i<constrainatoms[0];i++)
01358 fprintf(fp,"%25.15e %25.15e %25.15e\n",_Rc0[i].x,_Rc0[i].y,_Rc0[i].z);
01359
01360 fclose(fp);
01361
01362 INFO_Printf("writeRchain_parallel[%d] finished\n",myDomain);
01363 return 0;
01364 }
01365
01366 void MDPARALLELFrame::copyRchaintoCN_parallel()
01367 {
01368 int i, n, ipt;
01369 Matrix33 hinv;
01370
01371 hinv = _H.inv();
01372 n=constrainatoms[0];
01373 for(i=0;i<n;i++)
01374 {
01375 ipt=constrainatoms[i+1];
01376 _SR[ipt]=hinv*_Rc0[i];
01377 }
01378 INFO_Printf("copyRchaintoCN_parallel[%d] finished\n",myDomain);
01379 }
01380
01381 int MDPARALLELFrame::writefinalcnfile_parallel(int zip, bool bg)
01382 {
01383 char fname[200];
01384
01385 MPI_Bcast(finalcnfile, 200, MPI_CHAR, 0,MPI_COMM_WORLD);
01386 sprintf(fname,"%s.cpu%02d",finalcnfile,myDomain);
01387
01388 MDFrame::finalcn.open(fname);
01389 return MDFrame::finalcn.write(this,zip,bg);
01390 INFO_Printf("writefinalcnfile_parallel[%d] finished\n",myDomain);
01391 }
01392
01393 #endif
01394
01395
01396
01397
01398
01399
01400
01401
01402
01403
01404
01405
01406
01407 #ifdef _TEST
01408
01409 #ifdef _PARALLEL
01410 #define MDFrame MDPARALLELFrame
01411 #endif
01412
01413 class TESTMD : public MDFrame
01414 {
01415 public:
01416 void potential()
01417 {
01418
01419
01420 INFO("Empty potential");
01421 }
01422 virtual void initvars()
01423 {
01424 MDFrame::initvars();
01425 _RLIST=3.8;
01426 }
01427
01428 };
01429
01430 class TESTMD sim;
01431
01432
01433 #include "main.cpp"
01434
01435 #endif//_TEST