Fix segmentation fault in minimize
[gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_avx_256_double.cpp
blob6d9e2a08ddf681d39049592ffd52ad8b35b79cfb
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
38 #include "gmxpre.h"
40 #include "config.h"
42 #include <math.h>
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_double.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_double
51 * Electrostatics interaction: Coulomb
52 * VdW interaction: CubicSplineTable
53 * Geometry: Particle-Particle
54 * Calculate force/pot: PotentialAndForce
56 void
57 nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_double
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
75 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
76 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
77 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
78 real rcutoff_scalar;
79 real *shiftvec,*fshift,*x,*f;
80 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
81 real scratch[4*DIM];
82 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
83 real * vdwioffsetptr0;
84 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
85 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
86 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
87 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
88 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
89 real *charge;
90 int nvdwtype;
91 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
92 int *vdwtype;
93 real *vdwparam;
94 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
95 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
96 __m128i vfitab;
97 __m128i ifour = _mm_set1_epi32(4);
98 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
99 real *vftab;
100 __m256d dummy_mask,cutoff_mask;
101 __m128 tmpmask0,tmpmask1;
102 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
103 __m256d one = _mm256_set1_pd(1.0);
104 __m256d two = _mm256_set1_pd(2.0);
105 x = xx[0];
106 f = ff[0];
108 nri = nlist->nri;
109 iinr = nlist->iinr;
110 jindex = nlist->jindex;
111 jjnr = nlist->jjnr;
112 shiftidx = nlist->shift;
113 gid = nlist->gid;
114 shiftvec = fr->shift_vec[0];
115 fshift = fr->fshift[0];
116 facel = _mm256_set1_pd(fr->ic->epsfac);
117 charge = mdatoms->chargeA;
118 nvdwtype = fr->ntype;
119 vdwparam = fr->nbfp;
120 vdwtype = mdatoms->typeA;
122 vftab = kernel_data->table_vdw->data;
123 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
125 /* Avoid stupid compiler warnings */
126 jnrA = jnrB = jnrC = jnrD = 0;
127 j_coord_offsetA = 0;
128 j_coord_offsetB = 0;
129 j_coord_offsetC = 0;
130 j_coord_offsetD = 0;
132 outeriter = 0;
133 inneriter = 0;
135 for(iidx=0;iidx<4*DIM;iidx++)
137 scratch[iidx] = 0.0;
140 /* Start outer loop over neighborlists */
141 for(iidx=0; iidx<nri; iidx++)
143 /* Load shift vector for this list */
144 i_shift_offset = DIM*shiftidx[iidx];
146 /* Load limits for loop over neighbors */
147 j_index_start = jindex[iidx];
148 j_index_end = jindex[iidx+1];
150 /* Get outer coordinate index */
151 inr = iinr[iidx];
152 i_coord_offset = DIM*inr;
154 /* Load i particle coords and add shift vector */
155 gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
157 fix0 = _mm256_setzero_pd();
158 fiy0 = _mm256_setzero_pd();
159 fiz0 = _mm256_setzero_pd();
161 /* Load parameters for i particles */
162 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
163 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
165 /* Reset potential sums */
166 velecsum = _mm256_setzero_pd();
167 vvdwsum = _mm256_setzero_pd();
169 /* Start inner kernel loop */
170 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
173 /* Get j neighbor index, and coordinate index */
174 jnrA = jjnr[jidx];
175 jnrB = jjnr[jidx+1];
176 jnrC = jjnr[jidx+2];
177 jnrD = jjnr[jidx+3];
178 j_coord_offsetA = DIM*jnrA;
179 j_coord_offsetB = DIM*jnrB;
180 j_coord_offsetC = DIM*jnrC;
181 j_coord_offsetD = DIM*jnrD;
183 /* load j atom coordinates */
184 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
185 x+j_coord_offsetC,x+j_coord_offsetD,
186 &jx0,&jy0,&jz0);
188 /* Calculate displacement vector */
189 dx00 = _mm256_sub_pd(ix0,jx0);
190 dy00 = _mm256_sub_pd(iy0,jy0);
191 dz00 = _mm256_sub_pd(iz0,jz0);
193 /* Calculate squared distance and things based on it */
194 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
196 rinv00 = avx256_invsqrt_d(rsq00);
198 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
200 /* Load parameters for j particles */
201 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
202 charge+jnrC+0,charge+jnrD+0);
203 vdwjidx0A = 2*vdwtype[jnrA+0];
204 vdwjidx0B = 2*vdwtype[jnrB+0];
205 vdwjidx0C = 2*vdwtype[jnrC+0];
206 vdwjidx0D = 2*vdwtype[jnrD+0];
208 /**************************
209 * CALCULATE INTERACTIONS *
210 **************************/
212 r00 = _mm256_mul_pd(rsq00,rinv00);
214 /* Compute parameters for interactions between i and j atoms */
215 qq00 = _mm256_mul_pd(iq0,jq0);
216 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
217 vdwioffsetptr0+vdwjidx0B,
218 vdwioffsetptr0+vdwjidx0C,
219 vdwioffsetptr0+vdwjidx0D,
220 &c6_00,&c12_00);
222 /* Calculate table index by multiplying r with table scale and truncate to integer */
223 rt = _mm256_mul_pd(r00,vftabscale);
224 vfitab = _mm256_cvttpd_epi32(rt);
225 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
226 vfitab = _mm_slli_epi32(vfitab,3);
228 /* COULOMB ELECTROSTATICS */
229 velec = _mm256_mul_pd(qq00,rinv00);
230 felec = _mm256_mul_pd(velec,rinvsq00);
232 /* CUBIC SPLINE TABLE DISPERSION */
233 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
234 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
235 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
236 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
237 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
238 Heps = _mm256_mul_pd(vfeps,H);
239 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
240 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
241 vvdw6 = _mm256_mul_pd(c6_00,VV);
242 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
243 fvdw6 = _mm256_mul_pd(c6_00,FF);
245 /* CUBIC SPLINE TABLE REPULSION */
246 vfitab = _mm_add_epi32(vfitab,ifour);
247 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
248 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
249 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
250 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
251 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
252 Heps = _mm256_mul_pd(vfeps,H);
253 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
254 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
255 vvdw12 = _mm256_mul_pd(c12_00,VV);
256 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
257 fvdw12 = _mm256_mul_pd(c12_00,FF);
258 vvdw = _mm256_add_pd(vvdw12,vvdw6);
259 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
261 /* Update potential sum for this i atom from the interaction with this j atom. */
262 velecsum = _mm256_add_pd(velecsum,velec);
263 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
265 fscal = _mm256_add_pd(felec,fvdw);
267 /* Calculate temporary vectorial force */
268 tx = _mm256_mul_pd(fscal,dx00);
269 ty = _mm256_mul_pd(fscal,dy00);
270 tz = _mm256_mul_pd(fscal,dz00);
272 /* Update vectorial force */
273 fix0 = _mm256_add_pd(fix0,tx);
274 fiy0 = _mm256_add_pd(fiy0,ty);
275 fiz0 = _mm256_add_pd(fiz0,tz);
277 fjptrA = f+j_coord_offsetA;
278 fjptrB = f+j_coord_offsetB;
279 fjptrC = f+j_coord_offsetC;
280 fjptrD = f+j_coord_offsetD;
281 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
283 /* Inner loop uses 62 flops */
286 if(jidx<j_index_end)
289 /* Get j neighbor index, and coordinate index */
290 jnrlistA = jjnr[jidx];
291 jnrlistB = jjnr[jidx+1];
292 jnrlistC = jjnr[jidx+2];
293 jnrlistD = jjnr[jidx+3];
294 /* Sign of each element will be negative for non-real atoms.
295 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
296 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
298 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
300 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
301 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
302 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
304 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
305 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
306 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
307 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
308 j_coord_offsetA = DIM*jnrA;
309 j_coord_offsetB = DIM*jnrB;
310 j_coord_offsetC = DIM*jnrC;
311 j_coord_offsetD = DIM*jnrD;
313 /* load j atom coordinates */
314 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
315 x+j_coord_offsetC,x+j_coord_offsetD,
316 &jx0,&jy0,&jz0);
318 /* Calculate displacement vector */
319 dx00 = _mm256_sub_pd(ix0,jx0);
320 dy00 = _mm256_sub_pd(iy0,jy0);
321 dz00 = _mm256_sub_pd(iz0,jz0);
323 /* Calculate squared distance and things based on it */
324 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
326 rinv00 = avx256_invsqrt_d(rsq00);
328 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
330 /* Load parameters for j particles */
331 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
332 charge+jnrC+0,charge+jnrD+0);
333 vdwjidx0A = 2*vdwtype[jnrA+0];
334 vdwjidx0B = 2*vdwtype[jnrB+0];
335 vdwjidx0C = 2*vdwtype[jnrC+0];
336 vdwjidx0D = 2*vdwtype[jnrD+0];
338 /**************************
339 * CALCULATE INTERACTIONS *
340 **************************/
342 r00 = _mm256_mul_pd(rsq00,rinv00);
343 r00 = _mm256_andnot_pd(dummy_mask,r00);
345 /* Compute parameters for interactions between i and j atoms */
346 qq00 = _mm256_mul_pd(iq0,jq0);
347 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
348 vdwioffsetptr0+vdwjidx0B,
349 vdwioffsetptr0+vdwjidx0C,
350 vdwioffsetptr0+vdwjidx0D,
351 &c6_00,&c12_00);
353 /* Calculate table index by multiplying r with table scale and truncate to integer */
354 rt = _mm256_mul_pd(r00,vftabscale);
355 vfitab = _mm256_cvttpd_epi32(rt);
356 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
357 vfitab = _mm_slli_epi32(vfitab,3);
359 /* COULOMB ELECTROSTATICS */
360 velec = _mm256_mul_pd(qq00,rinv00);
361 felec = _mm256_mul_pd(velec,rinvsq00);
363 /* CUBIC SPLINE TABLE DISPERSION */
364 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
365 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
366 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
367 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
368 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
369 Heps = _mm256_mul_pd(vfeps,H);
370 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
371 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
372 vvdw6 = _mm256_mul_pd(c6_00,VV);
373 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
374 fvdw6 = _mm256_mul_pd(c6_00,FF);
376 /* CUBIC SPLINE TABLE REPULSION */
377 vfitab = _mm_add_epi32(vfitab,ifour);
378 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
379 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
380 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
381 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
382 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
383 Heps = _mm256_mul_pd(vfeps,H);
384 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
385 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
386 vvdw12 = _mm256_mul_pd(c12_00,VV);
387 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
388 fvdw12 = _mm256_mul_pd(c12_00,FF);
389 vvdw = _mm256_add_pd(vvdw12,vvdw6);
390 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
392 /* Update potential sum for this i atom from the interaction with this j atom. */
393 velec = _mm256_andnot_pd(dummy_mask,velec);
394 velecsum = _mm256_add_pd(velecsum,velec);
395 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
396 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
398 fscal = _mm256_add_pd(felec,fvdw);
400 fscal = _mm256_andnot_pd(dummy_mask,fscal);
402 /* Calculate temporary vectorial force */
403 tx = _mm256_mul_pd(fscal,dx00);
404 ty = _mm256_mul_pd(fscal,dy00);
405 tz = _mm256_mul_pd(fscal,dz00);
407 /* Update vectorial force */
408 fix0 = _mm256_add_pd(fix0,tx);
409 fiy0 = _mm256_add_pd(fiy0,ty);
410 fiz0 = _mm256_add_pd(fiz0,tz);
412 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
413 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
414 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
415 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
416 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
418 /* Inner loop uses 63 flops */
421 /* End of innermost loop */
423 gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
424 f+i_coord_offset,fshift+i_shift_offset);
426 ggid = gid[iidx];
427 /* Update potential energies */
428 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
429 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
431 /* Increment number of inner iterations */
432 inneriter += j_index_end - j_index_start;
434 /* Outer loop uses 9 flops */
437 /* Increment number of outer iterations */
438 outeriter += nri;
440 /* Update outer/inner flops */
442 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*63);
445 * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_double
446 * Electrostatics interaction: Coulomb
447 * VdW interaction: CubicSplineTable
448 * Geometry: Particle-Particle
449 * Calculate force/pot: Force
451 void
452 nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_double
453 (t_nblist * gmx_restrict nlist,
454 rvec * gmx_restrict xx,
455 rvec * gmx_restrict ff,
456 struct t_forcerec * gmx_restrict fr,
457 t_mdatoms * gmx_restrict mdatoms,
458 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
459 t_nrnb * gmx_restrict nrnb)
461 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
462 * just 0 for non-waters.
463 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
464 * jnr indices corresponding to data put in the four positions in the SIMD register.
466 int i_shift_offset,i_coord_offset,outeriter,inneriter;
467 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
468 int jnrA,jnrB,jnrC,jnrD;
469 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
470 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
471 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
472 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
473 real rcutoff_scalar;
474 real *shiftvec,*fshift,*x,*f;
475 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
476 real scratch[4*DIM];
477 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
478 real * vdwioffsetptr0;
479 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
480 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
481 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
482 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
483 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
484 real *charge;
485 int nvdwtype;
486 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
487 int *vdwtype;
488 real *vdwparam;
489 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
490 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
491 __m128i vfitab;
492 __m128i ifour = _mm_set1_epi32(4);
493 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
494 real *vftab;
495 __m256d dummy_mask,cutoff_mask;
496 __m128 tmpmask0,tmpmask1;
497 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
498 __m256d one = _mm256_set1_pd(1.0);
499 __m256d two = _mm256_set1_pd(2.0);
500 x = xx[0];
501 f = ff[0];
503 nri = nlist->nri;
504 iinr = nlist->iinr;
505 jindex = nlist->jindex;
506 jjnr = nlist->jjnr;
507 shiftidx = nlist->shift;
508 gid = nlist->gid;
509 shiftvec = fr->shift_vec[0];
510 fshift = fr->fshift[0];
511 facel = _mm256_set1_pd(fr->ic->epsfac);
512 charge = mdatoms->chargeA;
513 nvdwtype = fr->ntype;
514 vdwparam = fr->nbfp;
515 vdwtype = mdatoms->typeA;
517 vftab = kernel_data->table_vdw->data;
518 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
520 /* Avoid stupid compiler warnings */
521 jnrA = jnrB = jnrC = jnrD = 0;
522 j_coord_offsetA = 0;
523 j_coord_offsetB = 0;
524 j_coord_offsetC = 0;
525 j_coord_offsetD = 0;
527 outeriter = 0;
528 inneriter = 0;
530 for(iidx=0;iidx<4*DIM;iidx++)
532 scratch[iidx] = 0.0;
535 /* Start outer loop over neighborlists */
536 for(iidx=0; iidx<nri; iidx++)
538 /* Load shift vector for this list */
539 i_shift_offset = DIM*shiftidx[iidx];
541 /* Load limits for loop over neighbors */
542 j_index_start = jindex[iidx];
543 j_index_end = jindex[iidx+1];
545 /* Get outer coordinate index */
546 inr = iinr[iidx];
547 i_coord_offset = DIM*inr;
549 /* Load i particle coords and add shift vector */
550 gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
552 fix0 = _mm256_setzero_pd();
553 fiy0 = _mm256_setzero_pd();
554 fiz0 = _mm256_setzero_pd();
556 /* Load parameters for i particles */
557 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
558 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
560 /* Start inner kernel loop */
561 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
564 /* Get j neighbor index, and coordinate index */
565 jnrA = jjnr[jidx];
566 jnrB = jjnr[jidx+1];
567 jnrC = jjnr[jidx+2];
568 jnrD = jjnr[jidx+3];
569 j_coord_offsetA = DIM*jnrA;
570 j_coord_offsetB = DIM*jnrB;
571 j_coord_offsetC = DIM*jnrC;
572 j_coord_offsetD = DIM*jnrD;
574 /* load j atom coordinates */
575 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
576 x+j_coord_offsetC,x+j_coord_offsetD,
577 &jx0,&jy0,&jz0);
579 /* Calculate displacement vector */
580 dx00 = _mm256_sub_pd(ix0,jx0);
581 dy00 = _mm256_sub_pd(iy0,jy0);
582 dz00 = _mm256_sub_pd(iz0,jz0);
584 /* Calculate squared distance and things based on it */
585 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
587 rinv00 = avx256_invsqrt_d(rsq00);
589 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
591 /* Load parameters for j particles */
592 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
593 charge+jnrC+0,charge+jnrD+0);
594 vdwjidx0A = 2*vdwtype[jnrA+0];
595 vdwjidx0B = 2*vdwtype[jnrB+0];
596 vdwjidx0C = 2*vdwtype[jnrC+0];
597 vdwjidx0D = 2*vdwtype[jnrD+0];
599 /**************************
600 * CALCULATE INTERACTIONS *
601 **************************/
603 r00 = _mm256_mul_pd(rsq00,rinv00);
605 /* Compute parameters for interactions between i and j atoms */
606 qq00 = _mm256_mul_pd(iq0,jq0);
607 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
608 vdwioffsetptr0+vdwjidx0B,
609 vdwioffsetptr0+vdwjidx0C,
610 vdwioffsetptr0+vdwjidx0D,
611 &c6_00,&c12_00);
613 /* Calculate table index by multiplying r with table scale and truncate to integer */
614 rt = _mm256_mul_pd(r00,vftabscale);
615 vfitab = _mm256_cvttpd_epi32(rt);
616 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
617 vfitab = _mm_slli_epi32(vfitab,3);
619 /* COULOMB ELECTROSTATICS */
620 velec = _mm256_mul_pd(qq00,rinv00);
621 felec = _mm256_mul_pd(velec,rinvsq00);
623 /* CUBIC SPLINE TABLE DISPERSION */
624 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
625 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
626 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
627 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
628 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
629 Heps = _mm256_mul_pd(vfeps,H);
630 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
631 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
632 fvdw6 = _mm256_mul_pd(c6_00,FF);
634 /* CUBIC SPLINE TABLE REPULSION */
635 vfitab = _mm_add_epi32(vfitab,ifour);
636 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
637 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
638 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
639 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
640 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
641 Heps = _mm256_mul_pd(vfeps,H);
642 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
643 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
644 fvdw12 = _mm256_mul_pd(c12_00,FF);
645 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
647 fscal = _mm256_add_pd(felec,fvdw);
649 /* Calculate temporary vectorial force */
650 tx = _mm256_mul_pd(fscal,dx00);
651 ty = _mm256_mul_pd(fscal,dy00);
652 tz = _mm256_mul_pd(fscal,dz00);
654 /* Update vectorial force */
655 fix0 = _mm256_add_pd(fix0,tx);
656 fiy0 = _mm256_add_pd(fiy0,ty);
657 fiz0 = _mm256_add_pd(fiz0,tz);
659 fjptrA = f+j_coord_offsetA;
660 fjptrB = f+j_coord_offsetB;
661 fjptrC = f+j_coord_offsetC;
662 fjptrD = f+j_coord_offsetD;
663 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
665 /* Inner loop uses 53 flops */
668 if(jidx<j_index_end)
671 /* Get j neighbor index, and coordinate index */
672 jnrlistA = jjnr[jidx];
673 jnrlistB = jjnr[jidx+1];
674 jnrlistC = jjnr[jidx+2];
675 jnrlistD = jjnr[jidx+3];
676 /* Sign of each element will be negative for non-real atoms.
677 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
678 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
680 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
682 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
683 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
684 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
686 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
687 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
688 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
689 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
690 j_coord_offsetA = DIM*jnrA;
691 j_coord_offsetB = DIM*jnrB;
692 j_coord_offsetC = DIM*jnrC;
693 j_coord_offsetD = DIM*jnrD;
695 /* load j atom coordinates */
696 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
697 x+j_coord_offsetC,x+j_coord_offsetD,
698 &jx0,&jy0,&jz0);
700 /* Calculate displacement vector */
701 dx00 = _mm256_sub_pd(ix0,jx0);
702 dy00 = _mm256_sub_pd(iy0,jy0);
703 dz00 = _mm256_sub_pd(iz0,jz0);
705 /* Calculate squared distance and things based on it */
706 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
708 rinv00 = avx256_invsqrt_d(rsq00);
710 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
712 /* Load parameters for j particles */
713 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
714 charge+jnrC+0,charge+jnrD+0);
715 vdwjidx0A = 2*vdwtype[jnrA+0];
716 vdwjidx0B = 2*vdwtype[jnrB+0];
717 vdwjidx0C = 2*vdwtype[jnrC+0];
718 vdwjidx0D = 2*vdwtype[jnrD+0];
720 /**************************
721 * CALCULATE INTERACTIONS *
722 **************************/
724 r00 = _mm256_mul_pd(rsq00,rinv00);
725 r00 = _mm256_andnot_pd(dummy_mask,r00);
727 /* Compute parameters for interactions between i and j atoms */
728 qq00 = _mm256_mul_pd(iq0,jq0);
729 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
730 vdwioffsetptr0+vdwjidx0B,
731 vdwioffsetptr0+vdwjidx0C,
732 vdwioffsetptr0+vdwjidx0D,
733 &c6_00,&c12_00);
735 /* Calculate table index by multiplying r with table scale and truncate to integer */
736 rt = _mm256_mul_pd(r00,vftabscale);
737 vfitab = _mm256_cvttpd_epi32(rt);
738 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
739 vfitab = _mm_slli_epi32(vfitab,3);
741 /* COULOMB ELECTROSTATICS */
742 velec = _mm256_mul_pd(qq00,rinv00);
743 felec = _mm256_mul_pd(velec,rinvsq00);
745 /* CUBIC SPLINE TABLE DISPERSION */
746 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
747 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
748 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
749 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
750 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
751 Heps = _mm256_mul_pd(vfeps,H);
752 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
753 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
754 fvdw6 = _mm256_mul_pd(c6_00,FF);
756 /* CUBIC SPLINE TABLE REPULSION */
757 vfitab = _mm_add_epi32(vfitab,ifour);
758 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
759 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
760 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
761 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
762 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
763 Heps = _mm256_mul_pd(vfeps,H);
764 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
765 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
766 fvdw12 = _mm256_mul_pd(c12_00,FF);
767 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
769 fscal = _mm256_add_pd(felec,fvdw);
771 fscal = _mm256_andnot_pd(dummy_mask,fscal);
773 /* Calculate temporary vectorial force */
774 tx = _mm256_mul_pd(fscal,dx00);
775 ty = _mm256_mul_pd(fscal,dy00);
776 tz = _mm256_mul_pd(fscal,dz00);
778 /* Update vectorial force */
779 fix0 = _mm256_add_pd(fix0,tx);
780 fiy0 = _mm256_add_pd(fiy0,ty);
781 fiz0 = _mm256_add_pd(fiz0,tz);
783 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
784 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
785 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
786 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
787 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
789 /* Inner loop uses 54 flops */
792 /* End of innermost loop */
794 gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
795 f+i_coord_offset,fshift+i_shift_offset);
797 /* Increment number of inner iterations */
798 inneriter += j_index_end - j_index_start;
800 /* Outer loop uses 7 flops */
803 /* Increment number of outer iterations */
804 outeriter += nri;
806 /* Update outer/inner flops */
808 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*54);