File: lal_yukawa_colloid.h

package info (click to toggle)
lammps 20220106.git7586adbb6a%2Bds1-2
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 348,064 kB
  • sloc: cpp: 831,421; python: 24,896; xml: 14,949; f90: 10,845; ansic: 7,967; sh: 4,226; perl: 4,064; fortran: 2,424; makefile: 1,501; objc: 238; lisp: 163; csh: 16; awk: 14; tcl: 6
file content (122 lines) | stat: -rw-r--r-- 4,050 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/***************************************************************************
                              yukawa_colloid.h
                             -------------------
                            Trung Dac Nguyen (ORNL)

  Class for acceleration of the yukawa/colloid pair style.

 __________________________________________________________________________
    This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
 __________________________________________________________________________

    begin                :
    email                : nguyentd@ornl.gov
 ***************************************************************************/

#ifndef LAL_YUKAWA_COLLOID_H
#define LAL_YUKAWA_COLLOID_H

#include "lal_base_atomic.h"

namespace LAMMPS_AL {

template <class numtyp, class acctyp>
class YukawaColloid : public BaseAtomic<numtyp, acctyp> {
 public:
  YukawaColloid();
  ~YukawaColloid();

  /// Clear any previous data and set up for a new LAMMPS run
  /** \param max_nbors initial number of rows in the neighbor matrix
    * \param cell_size cutoff + skin
    * \param gpu_split fraction of particles handled by device
    *
    * Returns:
    * -  0 if successful
    * - -1 if fix gpu not found
    * - -3 if there is an out of memory error
    * - -4 if the GPU library was not compiled for GPU
    * - -5 Double precision is not supported on card **/
  int init(const int ntypes, double **host_cutsq,
           double **host_a, double **host_offset, double *host_special_lj,
           const int nlocal, const int nall, const int max_nbors,
           const int maxspecial, const double cell_size,
           const double gpu_split, FILE *screen, const double kappa);

  inline void cast_rad_data(double* rad) {
    int nall = this->atom->nall();
    if (_shared_view) {
      c_rad.host.view((numtyp*)rad,nall,*(this->ucl_device));
      c_rad.device.view(c_rad.host);
    } else {
      if (sizeof(numtyp)==sizeof(double))
        memcpy(c_rad.host.begin(),rad,nall*sizeof(numtyp));
      else
        for (int i=0; i<nall; i++) c_rad[i]=rad[i];
    }
  }

  // Copy rad to device asynchronously
  inline void add_rad_data() {
    c_rad.update_device(this->atom->nall(),true);
  }

  /// Clear all host and device data
  /** \note This is called at the beginning of the init() routine **/
  void clear();

  /// Returns memory usage on device per atom
  int bytes_per_atom(const int max_nbors) const;

  /// Total host memory used by library for pair style
  double host_memory_usage() const;

  /// Pair loop with host neighboring
  void compute(const int f_ago, const int inum_full,
               const int nall, double **host_x, int *host_type,
               int *ilist, int *numj, int **firstneigh,
               const bool eflag, const bool vflag,
               const bool eatom, const bool vatom, int &host_start,
               const double cpu_time, bool &success, double *rad);

  /// Pair loop with device neighboring
  int** compute(const int ago, const int inum_full, const int nall,
                double **host_x, int *host_type, double *sublo,
                double *subhi, tagint *tag, int **nspecial,
                tagint **special, const bool eflag, const bool vflag,
                const bool eatom, const bool vatom, int &host_start,
                int **ilist, int **jnum, const double cpu_time,
                bool &success, double *rad);

  // --------------------------- TEXTURES -----------------------------
  UCL_Texture rad_tex;

  // --------------------------- TYPE DATA --------------------------

  /// coeff.x = a, coeff.y = offset, coeff.z = cutsq
  UCL_D_Vec<numtyp4> coeff;
  /// Special LJ values
  UCL_D_Vec<numtyp> sp_lj;

  /// If atom type constants fit in shared memory, use fast kernels
  bool shared_types;

  /// Number of atom types
  int _lj_types;

  int _max_rad_size;

  numtyp _kappa;

  /// Per-atom arrays
  UCL_Vector<numtyp,numtyp> c_rad;

 private:
  bool _shared_view;
  bool _allocated;
  int loop(const int eflag, const int vflag);
};

}

#endif