File: lal_zbl.h

package info (click to toggle)
lammps 20220106.git7586adbb6a%2Bds1-2
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 348,064 kB
  • sloc: cpp: 831,421; python: 24,896; xml: 14,949; f90: 10,845; ansic: 7,967; sh: 4,226; perl: 4,064; fortran: 2,424; makefile: 1,501; objc: 238; lisp: 163; csh: 16; awk: 14; tcl: 6
file content (84 lines) | stat: -rw-r--r-- 2,780 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
/***************************************************************************
                                    zbl.h
                             -------------------
                            Trung Dac Nguyen (ORNL)

  Class for acceleration of the zbl pair style.

 __________________________________________________________________________
    This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
 __________________________________________________________________________

    begin                :
    email                : ndactrung@gmail.com
 ***************************************************************************/

#ifndef LAL_ZBL_H
#define LAL_ZBL_H

#include "lal_base_atomic.h"

namespace LAMMPS_AL {

template <class numtyp, class acctyp>
class ZBL : public BaseAtomic<numtyp, acctyp> {
 public:
  ZBL();
  ~ZBL();

  /// Clear any previous data and set up for a new LAMMPS run
  /** \param max_nbors initial number of rows in the neighbor matrix
    * \param cell_size cutoff + skin
    * \param gpu_split fraction of particles handled by device
    *
    * Returns:
    * -  0 if successful
    * - -1 if fix gpu not found
    * - -3 if there is an out of memory error
    * - -4 if the GPU library was not compiled for GPU
    * - -5 Double precision is not supported on card **/
  int init(const int ntypes, double **host_cutsq, double **host_sw1,
           double **host_sw2, double **host_sw3, double **host_sw4, double **host_sw5,
           double **host_d1a, double **host_d2a, double **host_d3a, double **host_d4a,
           double **host_zze, double cut_globalsq, double cut_innersq, double cut_inner,
           const int nlocal, const int nall, const int max_nbors,
           const int maxspecial, const double cell_size,
           const double gpu_split, FILE *screen);

  /// Clear all host and device data
  /** \note This is called at the beginning of the init() routine **/
  void clear();

  /// Returns memory usage on device per atom
  int bytes_per_atom(const int max_nbors) const;

  /// Total host memory used by library for pair style
  double host_memory_usage() const;

  // --------------------------- TYPE DATA --------------------------

  /// coeff1.x = sw1, coeff1.y = sw2, coeff1.z = zze, coeff1.w = cutsq
  UCL_D_Vec<numtyp4> coeff1;
  /// coeff2.x = d1a, coeff2.y = d2a, coeff2.z = d3a, coeff2.w = d4a
  UCL_D_Vec<numtyp4> coeff2;
  /// coeff3.x = sw3, coeff3.y = sw4, coeff3.z = sw5;
  UCL_D_Vec<numtyp4> coeff3;

  /// If atom type constants fit in shared memory, use fast kernels
  bool shared_types;

  numtyp _cut_globalsq;
  numtyp _cut_innersq;
  numtyp _cut_inner;

  /// Number of atom types
  int _lj_types;

 private:
  bool _allocated;
  int loop(const int eflag, const int vflag);
};

}

#endif