1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
|
// **************************************************************************
// pppm.cu
// -------------------
// W. Michael Brown (ORNL)
//
// Device code for PPPM acceleration
//
// __________________________________________________________________________
// This file is part of the LAMMPS Accelerator Library (LAMMPS_AL)
// __________________________________________________________________________
//
// begin :
// email : brownw@ornl.gov
// ***************************************************************************
#if defined(NV_KERNEL) || defined(USE_HIP)
#include "lal_preprocessor.h"
#ifndef _DOUBLE_DOUBLE
_texture( pos_tex,float4);
_texture( q_tex,float);
#else
_texture_2d( pos_tex,int4);
_texture( q_tex,int2);
#endif
// Allow PPPM to compile without atomics for NVIDIA 1.0 cards, error
// generated at runtime with use of pppm/gpu
#if defined(NV_KERNEL) && (__CUDA_ARCH__ < 110)
#define atomicAdd(x,y) *(x)+=0
#endif
#else
#define pos_tex x_
#define q_tex q_
#pragma OPENCL EXTENSION cl_khr_global_int32_base_atomics: enable
#ifdef GRD_DBL
#if defined(cl_amd_fp64)
#pragma OPENCL EXTENSION cl_amd_fp64 : enable
#else
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#endif
#endif
#endif
// Number of threads per pencil for charge spread
#define PENCIL_SIZE MEM_THREADS
// Number of pencils per block for charge spread
#define BLOCK_PENCILS (PPPM_BLOCK_1D/PENCIL_SIZE)
__kernel void particle_map(const __global numtyp4 *restrict x_,
const __global numtyp *restrict q_,
const grdtyp delvolinv, const int nlocal,
__global int *restrict counts,
__global grdtyp4 *restrict ans,
const grdtyp b_lo_x, const grdtyp b_lo_y,
const grdtyp b_lo_z, const grdtyp delxinv,
const grdtyp delyinv, const grdtyp delzinv,
const int nlocal_x, const int nlocal_y,
const int nlocal_z, const int atom_stride,
const int max_atoms,
__global int *restrict error) {
// ii indexes the two interacting particles in gi
int ii=GLOBAL_ID_X;
// Resequence the atom indices to avoid collisions during atomic ops
int nthreads=GLOBAL_SIZE_X;
ii=fast_mul(ii,PPPM_BLOCK_1D);
ii-=(ii/nthreads)*(nthreads-1);
int nx,ny,nz;
if (ii<nlocal) {
numtyp4 p;
fetch4(p,ii,pos_tex);
grdtyp4 delta;
fetch(delta.w,ii,q_tex);
delta.w*=delvolinv;
if (delta.w!=(grdtyp)0.0) {
delta.x=(p.x-b_lo_x)*delxinv;
nx=delta.x;
delta.y=(p.y-b_lo_y)*delyinv;
ny=delta.y;
delta.z=(p.z-b_lo_z)*delzinv;
nz=delta.z;
if (delta.x<(grdtyp)0 || delta.y<(grdtyp)0 || delta.z<(grdtyp)0 ||
nx>=nlocal_x || ny>=nlocal_y || nz>=nlocal_z)
*error=1;
else {
delta.x=nx+(grdtyp)0.5-delta.x;
delta.y=ny+(grdtyp)0.5-delta.y;
delta.z=nz+(grdtyp)0.5-delta.z;
int i=nz*nlocal_y*nlocal_x+ny*nlocal_x+nx;
int old=atom_add(counts+i, 1);
if (old>=max_atoms) {
*error=2;
atom_add(counts+i, -1);
} else
ans[atom_stride*old+i]=delta;
}
}
}
}
/* --------------------------- */
__kernel void make_rho(const __global int *restrict counts,
const __global grdtyp4 *restrict atoms,
__global grdtyp *restrict brick,
const __global grdtyp *restrict _rho_coeff,
const int atom_stride, const int npts_x,
const int npts_y, const int npts_z, const int nlocal_x,
const int nlocal_y, const int nlocal_z,
const int order_m_1, const int order, const int order2) {
__local grdtyp rho_coeff[PPPM_MAX_SPLINE*PPPM_MAX_SPLINE];
__local grdtyp front[BLOCK_PENCILS][PENCIL_SIZE+PPPM_MAX_SPLINE];
__local grdtyp ans[PPPM_MAX_SPLINE][PPPM_BLOCK_1D];
int tid=THREAD_ID_X;
if (tid<order2+order)
rho_coeff[tid]=_rho_coeff[tid];
int pid=tid/PENCIL_SIZE;
int fid=tid%PENCIL_SIZE;
int fid_halo=PENCIL_SIZE+fid;
if (fid<order)
front[pid][fid_halo]=(grdtyp)0.0;
__syncthreads();
int bt=BLOCK_ID_X*BLOCK_PENCILS+pid;
int ny=bt%npts_y;
int nz=bt/npts_y;
int y_start=0;
int z_start=0;
int y_stop=order;
int z_stop=order;
if (ny<order_m_1)
y_start=order_m_1-ny;
if (nz<order_m_1)
z_start=order_m_1-nz;
if (ny>=nlocal_y)
y_stop-=ny-nlocal_y+1;
if (nz>=nlocal_z)
z_stop-=nz-nlocal_z+1;
int z_stride=fast_mul(nlocal_x,nlocal_y);
int loop_count=npts_x/PENCIL_SIZE+1;
int nx=fid;
int pt=fast_mul(nz,fast_mul(npts_y,npts_x))+fast_mul(ny,npts_x)+nx;
for (int i=0 ; i<loop_count; i++) {
for (int n=0; n<order; n++)
ans[n][tid]=(grdtyp)0.0;
if (nx<nlocal_x && nz<npts_z) {
int z_pos=fast_mul(nz+z_start-order_m_1,z_stride);
for (int m=z_start; m<z_stop; m++) {
int y_pos=fast_mul(ny+y_start-order_m_1,nlocal_x);
for (int l=y_start; l<y_stop; l++) {
int pos=z_pos+y_pos+nx;
int natoms=fast_mul(counts[pos],atom_stride);
for (int row=pos; row<natoms; row+=atom_stride) {
grdtyp4 delta=atoms[row];
grdtyp rho1d_1=(grdtyp)0.0;
grdtyp rho1d_2=(grdtyp)0.0;
for (int k=order2+order-1; k > -1; k-=order) {
rho1d_1=rho_coeff[k-l]+rho1d_1*delta.y;
rho1d_2=rho_coeff[k-m]+rho1d_2*delta.z;
}
delta.w*=rho1d_1*rho1d_2;
for (int n=0; n<order; n++) {
grdtyp rho1d_0=(grdtyp)0.0;
for (int k=order2+n; k>=n; k-=order)
rho1d_0=rho_coeff[k]+rho1d_0*delta.x;
ans[n][tid]+=delta.w*rho1d_0;
}
}
y_pos+=nlocal_x;
}
z_pos+=z_stride;
}
}
__syncthreads();
if (fid<order) {
front[pid][fid]=front[pid][fid_halo];
front[pid][fid_halo]=(grdtyp)0.0;
} else
front[pid][fid]=(grdtyp)0.0;
for (int n=0; n<order; n++) {
front[pid][fid+n]+=ans[n][tid];
__syncthreads();
}
if (nx<npts_x && nz<npts_z)
brick[pt]=front[pid][fid];
pt+=PENCIL_SIZE;
nx+=PENCIL_SIZE;
}
}
__kernel void interp(const __global numtyp4 *restrict x_,
const __global numtyp *restrict q_,
const int nlocal,
const __global grdtyp4 *restrict brick,
const __global grdtyp *restrict _rho_coeff,
const int npts_x, const int npts_yx, const grdtyp b_lo_x,
const grdtyp b_lo_y, const grdtyp b_lo_z,
const grdtyp delxinv, const grdtyp delyinv,
const grdtyp delzinv, const int order,
const int order2, const grdtyp qqrd2e_scale,
__global acctyp4 *restrict ans) {
__local grdtyp rho_coeff[PPPM_MAX_SPLINE*PPPM_MAX_SPLINE];
__local grdtyp rho1d_0[PPPM_MAX_SPLINE][PPPM_BLOCK_1D];
__local grdtyp rho1d_1[PPPM_MAX_SPLINE][PPPM_BLOCK_1D];
int tid=THREAD_ID_X;
if (tid<order2+order)
rho_coeff[tid]=_rho_coeff[tid];
__syncthreads();
int ii=tid+BLOCK_ID_X*BLOCK_SIZE_X;
int nx,ny,nz;
grdtyp tx,ty,tz;
if (ii<nlocal) {
numtyp4 p;
fetch4(p,ii,pos_tex);
grdtyp qs;
fetch(qs,ii,q_tex);
qs*=qqrd2e_scale;
acctyp4 ek;
ek.x=(acctyp)0.0;
ek.y=(acctyp)0.0;
ek.z=(acctyp)0.0;
if (qs!=(grdtyp)0.0) {
tx=(p.x-b_lo_x)*delxinv;
nx=tx;
ty=(p.y-b_lo_y)*delyinv;
ny=ty;
tz=(p.z-b_lo_z)*delzinv;
nz=tz;
grdtyp dx=nx+(grdtyp)0.5-tx;
grdtyp dy=ny+(grdtyp)0.5-ty;
grdtyp dz=nz+(grdtyp)0.5-tz;
for (int k=0; k<order; k++) {
rho1d_0[k][tid]=(grdtyp)0.0;
rho1d_1[k][tid]=(grdtyp)0.0;
for (int l=order2+k; l>=k; l-=order) {
rho1d_0[k][tid]=rho_coeff[l]+rho1d_0[k][tid]*dx;
rho1d_1[k][tid]=rho_coeff[l]+rho1d_1[k][tid]*dy;
}
}
int mz=fast_mul(nz,npts_yx)+nx;
for (int n=0; n<order; n++) {
grdtyp rho1d_2=(grdtyp)0.0;
for (int k=order2+n; k>=n; k-=order)
rho1d_2=rho_coeff[k]+rho1d_2*dz;
grdtyp z0=qs*rho1d_2;
int my=mz+fast_mul(ny,npts_x);
for (int m=0; m<order; m++) {
grdtyp y0=z0*rho1d_1[m][tid];
for (int l=0; l<order; l++) {
grdtyp x0=y0*rho1d_0[l][tid];
grdtyp4 el=brick[my+l];
ek.x-=x0*el.x;
ek.y-=x0*el.y;
ek.z-=x0*el.z;
}
my+=npts_x;
}
mz+=npts_yx;
}
}
ans[ii]=ek;
}
}
|