Remove nb-parameters from t_forcerec
[gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecRF_VdwCSTab_GeomW4W4_avx_256_double.c
blob773bb36568e3715338b6676f1bbe832998066d1b
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
38 #include "gmxpre.h"
40 #include "config.h"
42 #include <math.h>
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_double.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_double
51 * Electrostatics interaction: ReactionField
52 * VdW interaction: CubicSplineTable
53 * Geometry: Water4-Water4
54 * Calculate force/pot: PotentialAndForce
56 void
57 nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_double
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
75 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
76 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
77 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
78 real rcutoff_scalar;
79 real *shiftvec,*fshift,*x,*f;
80 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
81 real scratch[4*DIM];
82 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
83 real * vdwioffsetptr0;
84 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
85 real * vdwioffsetptr1;
86 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
87 real * vdwioffsetptr2;
88 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
89 real * vdwioffsetptr3;
90 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
91 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
92 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
93 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
94 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
95 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
96 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
97 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
98 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
99 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
100 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
101 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
102 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
103 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
104 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
105 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
106 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
107 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
108 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
109 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
110 real *charge;
111 int nvdwtype;
112 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
113 int *vdwtype;
114 real *vdwparam;
115 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
116 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
117 __m128i vfitab;
118 __m128i ifour = _mm_set1_epi32(4);
119 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
120 real *vftab;
121 __m256d dummy_mask,cutoff_mask;
122 __m128 tmpmask0,tmpmask1;
123 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
124 __m256d one = _mm256_set1_pd(1.0);
125 __m256d two = _mm256_set1_pd(2.0);
126 x = xx[0];
127 f = ff[0];
129 nri = nlist->nri;
130 iinr = nlist->iinr;
131 jindex = nlist->jindex;
132 jjnr = nlist->jjnr;
133 shiftidx = nlist->shift;
134 gid = nlist->gid;
135 shiftvec = fr->shift_vec[0];
136 fshift = fr->fshift[0];
137 facel = _mm256_set1_pd(fr->ic->epsfac);
138 charge = mdatoms->chargeA;
139 krf = _mm256_set1_pd(fr->ic->k_rf);
140 krf2 = _mm256_set1_pd(fr->ic->k_rf*2.0);
141 crf = _mm256_set1_pd(fr->ic->c_rf);
142 nvdwtype = fr->ntype;
143 vdwparam = fr->nbfp;
144 vdwtype = mdatoms->typeA;
146 vftab = kernel_data->table_vdw->data;
147 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
149 /* Setup water-specific parameters */
150 inr = nlist->iinr[0];
151 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
152 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
153 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
154 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
156 jq1 = _mm256_set1_pd(charge[inr+1]);
157 jq2 = _mm256_set1_pd(charge[inr+2]);
158 jq3 = _mm256_set1_pd(charge[inr+3]);
159 vdwjidx0A = 2*vdwtype[inr+0];
160 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
161 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
162 qq11 = _mm256_mul_pd(iq1,jq1);
163 qq12 = _mm256_mul_pd(iq1,jq2);
164 qq13 = _mm256_mul_pd(iq1,jq3);
165 qq21 = _mm256_mul_pd(iq2,jq1);
166 qq22 = _mm256_mul_pd(iq2,jq2);
167 qq23 = _mm256_mul_pd(iq2,jq3);
168 qq31 = _mm256_mul_pd(iq3,jq1);
169 qq32 = _mm256_mul_pd(iq3,jq2);
170 qq33 = _mm256_mul_pd(iq3,jq3);
172 /* Avoid stupid compiler warnings */
173 jnrA = jnrB = jnrC = jnrD = 0;
174 j_coord_offsetA = 0;
175 j_coord_offsetB = 0;
176 j_coord_offsetC = 0;
177 j_coord_offsetD = 0;
179 outeriter = 0;
180 inneriter = 0;
182 for(iidx=0;iidx<4*DIM;iidx++)
184 scratch[iidx] = 0.0;
187 /* Start outer loop over neighborlists */
188 for(iidx=0; iidx<nri; iidx++)
190 /* Load shift vector for this list */
191 i_shift_offset = DIM*shiftidx[iidx];
193 /* Load limits for loop over neighbors */
194 j_index_start = jindex[iidx];
195 j_index_end = jindex[iidx+1];
197 /* Get outer coordinate index */
198 inr = iinr[iidx];
199 i_coord_offset = DIM*inr;
201 /* Load i particle coords and add shift vector */
202 gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
203 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
205 fix0 = _mm256_setzero_pd();
206 fiy0 = _mm256_setzero_pd();
207 fiz0 = _mm256_setzero_pd();
208 fix1 = _mm256_setzero_pd();
209 fiy1 = _mm256_setzero_pd();
210 fiz1 = _mm256_setzero_pd();
211 fix2 = _mm256_setzero_pd();
212 fiy2 = _mm256_setzero_pd();
213 fiz2 = _mm256_setzero_pd();
214 fix3 = _mm256_setzero_pd();
215 fiy3 = _mm256_setzero_pd();
216 fiz3 = _mm256_setzero_pd();
218 /* Reset potential sums */
219 velecsum = _mm256_setzero_pd();
220 vvdwsum = _mm256_setzero_pd();
222 /* Start inner kernel loop */
223 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
226 /* Get j neighbor index, and coordinate index */
227 jnrA = jjnr[jidx];
228 jnrB = jjnr[jidx+1];
229 jnrC = jjnr[jidx+2];
230 jnrD = jjnr[jidx+3];
231 j_coord_offsetA = DIM*jnrA;
232 j_coord_offsetB = DIM*jnrB;
233 j_coord_offsetC = DIM*jnrC;
234 j_coord_offsetD = DIM*jnrD;
236 /* load j atom coordinates */
237 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
238 x+j_coord_offsetC,x+j_coord_offsetD,
239 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
240 &jy2,&jz2,&jx3,&jy3,&jz3);
242 /* Calculate displacement vector */
243 dx00 = _mm256_sub_pd(ix0,jx0);
244 dy00 = _mm256_sub_pd(iy0,jy0);
245 dz00 = _mm256_sub_pd(iz0,jz0);
246 dx11 = _mm256_sub_pd(ix1,jx1);
247 dy11 = _mm256_sub_pd(iy1,jy1);
248 dz11 = _mm256_sub_pd(iz1,jz1);
249 dx12 = _mm256_sub_pd(ix1,jx2);
250 dy12 = _mm256_sub_pd(iy1,jy2);
251 dz12 = _mm256_sub_pd(iz1,jz2);
252 dx13 = _mm256_sub_pd(ix1,jx3);
253 dy13 = _mm256_sub_pd(iy1,jy3);
254 dz13 = _mm256_sub_pd(iz1,jz3);
255 dx21 = _mm256_sub_pd(ix2,jx1);
256 dy21 = _mm256_sub_pd(iy2,jy1);
257 dz21 = _mm256_sub_pd(iz2,jz1);
258 dx22 = _mm256_sub_pd(ix2,jx2);
259 dy22 = _mm256_sub_pd(iy2,jy2);
260 dz22 = _mm256_sub_pd(iz2,jz2);
261 dx23 = _mm256_sub_pd(ix2,jx3);
262 dy23 = _mm256_sub_pd(iy2,jy3);
263 dz23 = _mm256_sub_pd(iz2,jz3);
264 dx31 = _mm256_sub_pd(ix3,jx1);
265 dy31 = _mm256_sub_pd(iy3,jy1);
266 dz31 = _mm256_sub_pd(iz3,jz1);
267 dx32 = _mm256_sub_pd(ix3,jx2);
268 dy32 = _mm256_sub_pd(iy3,jy2);
269 dz32 = _mm256_sub_pd(iz3,jz2);
270 dx33 = _mm256_sub_pd(ix3,jx3);
271 dy33 = _mm256_sub_pd(iy3,jy3);
272 dz33 = _mm256_sub_pd(iz3,jz3);
274 /* Calculate squared distance and things based on it */
275 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
276 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
277 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
278 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
279 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
280 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
281 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
282 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
283 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
284 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
286 rinv00 = avx256_invsqrt_d(rsq00);
287 rinv11 = avx256_invsqrt_d(rsq11);
288 rinv12 = avx256_invsqrt_d(rsq12);
289 rinv13 = avx256_invsqrt_d(rsq13);
290 rinv21 = avx256_invsqrt_d(rsq21);
291 rinv22 = avx256_invsqrt_d(rsq22);
292 rinv23 = avx256_invsqrt_d(rsq23);
293 rinv31 = avx256_invsqrt_d(rsq31);
294 rinv32 = avx256_invsqrt_d(rsq32);
295 rinv33 = avx256_invsqrt_d(rsq33);
297 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
298 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
299 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
300 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
301 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
302 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
303 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
304 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
305 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
307 fjx0 = _mm256_setzero_pd();
308 fjy0 = _mm256_setzero_pd();
309 fjz0 = _mm256_setzero_pd();
310 fjx1 = _mm256_setzero_pd();
311 fjy1 = _mm256_setzero_pd();
312 fjz1 = _mm256_setzero_pd();
313 fjx2 = _mm256_setzero_pd();
314 fjy2 = _mm256_setzero_pd();
315 fjz2 = _mm256_setzero_pd();
316 fjx3 = _mm256_setzero_pd();
317 fjy3 = _mm256_setzero_pd();
318 fjz3 = _mm256_setzero_pd();
320 /**************************
321 * CALCULATE INTERACTIONS *
322 **************************/
324 r00 = _mm256_mul_pd(rsq00,rinv00);
326 /* Calculate table index by multiplying r with table scale and truncate to integer */
327 rt = _mm256_mul_pd(r00,vftabscale);
328 vfitab = _mm256_cvttpd_epi32(rt);
329 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
330 vfitab = _mm_slli_epi32(vfitab,3);
332 /* CUBIC SPLINE TABLE DISPERSION */
333 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
334 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
335 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
336 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
337 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
338 Heps = _mm256_mul_pd(vfeps,H);
339 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
340 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
341 vvdw6 = _mm256_mul_pd(c6_00,VV);
342 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
343 fvdw6 = _mm256_mul_pd(c6_00,FF);
345 /* CUBIC SPLINE TABLE REPULSION */
346 vfitab = _mm_add_epi32(vfitab,ifour);
347 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
348 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
349 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
350 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
351 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
352 Heps = _mm256_mul_pd(vfeps,H);
353 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
354 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
355 vvdw12 = _mm256_mul_pd(c12_00,VV);
356 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
357 fvdw12 = _mm256_mul_pd(c12_00,FF);
358 vvdw = _mm256_add_pd(vvdw12,vvdw6);
359 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
361 /* Update potential sum for this i atom from the interaction with this j atom. */
362 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
364 fscal = fvdw;
366 /* Calculate temporary vectorial force */
367 tx = _mm256_mul_pd(fscal,dx00);
368 ty = _mm256_mul_pd(fscal,dy00);
369 tz = _mm256_mul_pd(fscal,dz00);
371 /* Update vectorial force */
372 fix0 = _mm256_add_pd(fix0,tx);
373 fiy0 = _mm256_add_pd(fiy0,ty);
374 fiz0 = _mm256_add_pd(fiz0,tz);
376 fjx0 = _mm256_add_pd(fjx0,tx);
377 fjy0 = _mm256_add_pd(fjy0,ty);
378 fjz0 = _mm256_add_pd(fjz0,tz);
380 /**************************
381 * CALCULATE INTERACTIONS *
382 **************************/
384 /* REACTION-FIELD ELECTROSTATICS */
385 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
386 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
388 /* Update potential sum for this i atom from the interaction with this j atom. */
389 velecsum = _mm256_add_pd(velecsum,velec);
391 fscal = felec;
393 /* Calculate temporary vectorial force */
394 tx = _mm256_mul_pd(fscal,dx11);
395 ty = _mm256_mul_pd(fscal,dy11);
396 tz = _mm256_mul_pd(fscal,dz11);
398 /* Update vectorial force */
399 fix1 = _mm256_add_pd(fix1,tx);
400 fiy1 = _mm256_add_pd(fiy1,ty);
401 fiz1 = _mm256_add_pd(fiz1,tz);
403 fjx1 = _mm256_add_pd(fjx1,tx);
404 fjy1 = _mm256_add_pd(fjy1,ty);
405 fjz1 = _mm256_add_pd(fjz1,tz);
407 /**************************
408 * CALCULATE INTERACTIONS *
409 **************************/
411 /* REACTION-FIELD ELECTROSTATICS */
412 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
413 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
415 /* Update potential sum for this i atom from the interaction with this j atom. */
416 velecsum = _mm256_add_pd(velecsum,velec);
418 fscal = felec;
420 /* Calculate temporary vectorial force */
421 tx = _mm256_mul_pd(fscal,dx12);
422 ty = _mm256_mul_pd(fscal,dy12);
423 tz = _mm256_mul_pd(fscal,dz12);
425 /* Update vectorial force */
426 fix1 = _mm256_add_pd(fix1,tx);
427 fiy1 = _mm256_add_pd(fiy1,ty);
428 fiz1 = _mm256_add_pd(fiz1,tz);
430 fjx2 = _mm256_add_pd(fjx2,tx);
431 fjy2 = _mm256_add_pd(fjy2,ty);
432 fjz2 = _mm256_add_pd(fjz2,tz);
434 /**************************
435 * CALCULATE INTERACTIONS *
436 **************************/
438 /* REACTION-FIELD ELECTROSTATICS */
439 velec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
440 felec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
442 /* Update potential sum for this i atom from the interaction with this j atom. */
443 velecsum = _mm256_add_pd(velecsum,velec);
445 fscal = felec;
447 /* Calculate temporary vectorial force */
448 tx = _mm256_mul_pd(fscal,dx13);
449 ty = _mm256_mul_pd(fscal,dy13);
450 tz = _mm256_mul_pd(fscal,dz13);
452 /* Update vectorial force */
453 fix1 = _mm256_add_pd(fix1,tx);
454 fiy1 = _mm256_add_pd(fiy1,ty);
455 fiz1 = _mm256_add_pd(fiz1,tz);
457 fjx3 = _mm256_add_pd(fjx3,tx);
458 fjy3 = _mm256_add_pd(fjy3,ty);
459 fjz3 = _mm256_add_pd(fjz3,tz);
461 /**************************
462 * CALCULATE INTERACTIONS *
463 **************************/
465 /* REACTION-FIELD ELECTROSTATICS */
466 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
467 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
469 /* Update potential sum for this i atom from the interaction with this j atom. */
470 velecsum = _mm256_add_pd(velecsum,velec);
472 fscal = felec;
474 /* Calculate temporary vectorial force */
475 tx = _mm256_mul_pd(fscal,dx21);
476 ty = _mm256_mul_pd(fscal,dy21);
477 tz = _mm256_mul_pd(fscal,dz21);
479 /* Update vectorial force */
480 fix2 = _mm256_add_pd(fix2,tx);
481 fiy2 = _mm256_add_pd(fiy2,ty);
482 fiz2 = _mm256_add_pd(fiz2,tz);
484 fjx1 = _mm256_add_pd(fjx1,tx);
485 fjy1 = _mm256_add_pd(fjy1,ty);
486 fjz1 = _mm256_add_pd(fjz1,tz);
488 /**************************
489 * CALCULATE INTERACTIONS *
490 **************************/
492 /* REACTION-FIELD ELECTROSTATICS */
493 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
494 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
496 /* Update potential sum for this i atom from the interaction with this j atom. */
497 velecsum = _mm256_add_pd(velecsum,velec);
499 fscal = felec;
501 /* Calculate temporary vectorial force */
502 tx = _mm256_mul_pd(fscal,dx22);
503 ty = _mm256_mul_pd(fscal,dy22);
504 tz = _mm256_mul_pd(fscal,dz22);
506 /* Update vectorial force */
507 fix2 = _mm256_add_pd(fix2,tx);
508 fiy2 = _mm256_add_pd(fiy2,ty);
509 fiz2 = _mm256_add_pd(fiz2,tz);
511 fjx2 = _mm256_add_pd(fjx2,tx);
512 fjy2 = _mm256_add_pd(fjy2,ty);
513 fjz2 = _mm256_add_pd(fjz2,tz);
515 /**************************
516 * CALCULATE INTERACTIONS *
517 **************************/
519 /* REACTION-FIELD ELECTROSTATICS */
520 velec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
521 felec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
523 /* Update potential sum for this i atom from the interaction with this j atom. */
524 velecsum = _mm256_add_pd(velecsum,velec);
526 fscal = felec;
528 /* Calculate temporary vectorial force */
529 tx = _mm256_mul_pd(fscal,dx23);
530 ty = _mm256_mul_pd(fscal,dy23);
531 tz = _mm256_mul_pd(fscal,dz23);
533 /* Update vectorial force */
534 fix2 = _mm256_add_pd(fix2,tx);
535 fiy2 = _mm256_add_pd(fiy2,ty);
536 fiz2 = _mm256_add_pd(fiz2,tz);
538 fjx3 = _mm256_add_pd(fjx3,tx);
539 fjy3 = _mm256_add_pd(fjy3,ty);
540 fjz3 = _mm256_add_pd(fjz3,tz);
542 /**************************
543 * CALCULATE INTERACTIONS *
544 **************************/
546 /* REACTION-FIELD ELECTROSTATICS */
547 velec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
548 felec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
550 /* Update potential sum for this i atom from the interaction with this j atom. */
551 velecsum = _mm256_add_pd(velecsum,velec);
553 fscal = felec;
555 /* Calculate temporary vectorial force */
556 tx = _mm256_mul_pd(fscal,dx31);
557 ty = _mm256_mul_pd(fscal,dy31);
558 tz = _mm256_mul_pd(fscal,dz31);
560 /* Update vectorial force */
561 fix3 = _mm256_add_pd(fix3,tx);
562 fiy3 = _mm256_add_pd(fiy3,ty);
563 fiz3 = _mm256_add_pd(fiz3,tz);
565 fjx1 = _mm256_add_pd(fjx1,tx);
566 fjy1 = _mm256_add_pd(fjy1,ty);
567 fjz1 = _mm256_add_pd(fjz1,tz);
569 /**************************
570 * CALCULATE INTERACTIONS *
571 **************************/
573 /* REACTION-FIELD ELECTROSTATICS */
574 velec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
575 felec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
577 /* Update potential sum for this i atom from the interaction with this j atom. */
578 velecsum = _mm256_add_pd(velecsum,velec);
580 fscal = felec;
582 /* Calculate temporary vectorial force */
583 tx = _mm256_mul_pd(fscal,dx32);
584 ty = _mm256_mul_pd(fscal,dy32);
585 tz = _mm256_mul_pd(fscal,dz32);
587 /* Update vectorial force */
588 fix3 = _mm256_add_pd(fix3,tx);
589 fiy3 = _mm256_add_pd(fiy3,ty);
590 fiz3 = _mm256_add_pd(fiz3,tz);
592 fjx2 = _mm256_add_pd(fjx2,tx);
593 fjy2 = _mm256_add_pd(fjy2,ty);
594 fjz2 = _mm256_add_pd(fjz2,tz);
596 /**************************
597 * CALCULATE INTERACTIONS *
598 **************************/
600 /* REACTION-FIELD ELECTROSTATICS */
601 velec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
602 felec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
604 /* Update potential sum for this i atom from the interaction with this j atom. */
605 velecsum = _mm256_add_pd(velecsum,velec);
607 fscal = felec;
609 /* Calculate temporary vectorial force */
610 tx = _mm256_mul_pd(fscal,dx33);
611 ty = _mm256_mul_pd(fscal,dy33);
612 tz = _mm256_mul_pd(fscal,dz33);
614 /* Update vectorial force */
615 fix3 = _mm256_add_pd(fix3,tx);
616 fiy3 = _mm256_add_pd(fiy3,ty);
617 fiz3 = _mm256_add_pd(fiz3,tz);
619 fjx3 = _mm256_add_pd(fjx3,tx);
620 fjy3 = _mm256_add_pd(fjy3,ty);
621 fjz3 = _mm256_add_pd(fjz3,tz);
623 fjptrA = f+j_coord_offsetA;
624 fjptrB = f+j_coord_offsetB;
625 fjptrC = f+j_coord_offsetC;
626 fjptrD = f+j_coord_offsetD;
628 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
629 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
630 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
632 /* Inner loop uses 347 flops */
635 if(jidx<j_index_end)
638 /* Get j neighbor index, and coordinate index */
639 jnrlistA = jjnr[jidx];
640 jnrlistB = jjnr[jidx+1];
641 jnrlistC = jjnr[jidx+2];
642 jnrlistD = jjnr[jidx+3];
643 /* Sign of each element will be negative for non-real atoms.
644 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
645 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
647 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
649 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
650 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
651 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
653 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
654 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
655 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
656 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
657 j_coord_offsetA = DIM*jnrA;
658 j_coord_offsetB = DIM*jnrB;
659 j_coord_offsetC = DIM*jnrC;
660 j_coord_offsetD = DIM*jnrD;
662 /* load j atom coordinates */
663 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
664 x+j_coord_offsetC,x+j_coord_offsetD,
665 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
666 &jy2,&jz2,&jx3,&jy3,&jz3);
668 /* Calculate displacement vector */
669 dx00 = _mm256_sub_pd(ix0,jx0);
670 dy00 = _mm256_sub_pd(iy0,jy0);
671 dz00 = _mm256_sub_pd(iz0,jz0);
672 dx11 = _mm256_sub_pd(ix1,jx1);
673 dy11 = _mm256_sub_pd(iy1,jy1);
674 dz11 = _mm256_sub_pd(iz1,jz1);
675 dx12 = _mm256_sub_pd(ix1,jx2);
676 dy12 = _mm256_sub_pd(iy1,jy2);
677 dz12 = _mm256_sub_pd(iz1,jz2);
678 dx13 = _mm256_sub_pd(ix1,jx3);
679 dy13 = _mm256_sub_pd(iy1,jy3);
680 dz13 = _mm256_sub_pd(iz1,jz3);
681 dx21 = _mm256_sub_pd(ix2,jx1);
682 dy21 = _mm256_sub_pd(iy2,jy1);
683 dz21 = _mm256_sub_pd(iz2,jz1);
684 dx22 = _mm256_sub_pd(ix2,jx2);
685 dy22 = _mm256_sub_pd(iy2,jy2);
686 dz22 = _mm256_sub_pd(iz2,jz2);
687 dx23 = _mm256_sub_pd(ix2,jx3);
688 dy23 = _mm256_sub_pd(iy2,jy3);
689 dz23 = _mm256_sub_pd(iz2,jz3);
690 dx31 = _mm256_sub_pd(ix3,jx1);
691 dy31 = _mm256_sub_pd(iy3,jy1);
692 dz31 = _mm256_sub_pd(iz3,jz1);
693 dx32 = _mm256_sub_pd(ix3,jx2);
694 dy32 = _mm256_sub_pd(iy3,jy2);
695 dz32 = _mm256_sub_pd(iz3,jz2);
696 dx33 = _mm256_sub_pd(ix3,jx3);
697 dy33 = _mm256_sub_pd(iy3,jy3);
698 dz33 = _mm256_sub_pd(iz3,jz3);
700 /* Calculate squared distance and things based on it */
701 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
702 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
703 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
704 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
705 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
706 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
707 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
708 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
709 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
710 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
712 rinv00 = avx256_invsqrt_d(rsq00);
713 rinv11 = avx256_invsqrt_d(rsq11);
714 rinv12 = avx256_invsqrt_d(rsq12);
715 rinv13 = avx256_invsqrt_d(rsq13);
716 rinv21 = avx256_invsqrt_d(rsq21);
717 rinv22 = avx256_invsqrt_d(rsq22);
718 rinv23 = avx256_invsqrt_d(rsq23);
719 rinv31 = avx256_invsqrt_d(rsq31);
720 rinv32 = avx256_invsqrt_d(rsq32);
721 rinv33 = avx256_invsqrt_d(rsq33);
723 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
724 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
725 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
726 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
727 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
728 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
729 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
730 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
731 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
733 fjx0 = _mm256_setzero_pd();
734 fjy0 = _mm256_setzero_pd();
735 fjz0 = _mm256_setzero_pd();
736 fjx1 = _mm256_setzero_pd();
737 fjy1 = _mm256_setzero_pd();
738 fjz1 = _mm256_setzero_pd();
739 fjx2 = _mm256_setzero_pd();
740 fjy2 = _mm256_setzero_pd();
741 fjz2 = _mm256_setzero_pd();
742 fjx3 = _mm256_setzero_pd();
743 fjy3 = _mm256_setzero_pd();
744 fjz3 = _mm256_setzero_pd();
746 /**************************
747 * CALCULATE INTERACTIONS *
748 **************************/
750 r00 = _mm256_mul_pd(rsq00,rinv00);
751 r00 = _mm256_andnot_pd(dummy_mask,r00);
753 /* Calculate table index by multiplying r with table scale and truncate to integer */
754 rt = _mm256_mul_pd(r00,vftabscale);
755 vfitab = _mm256_cvttpd_epi32(rt);
756 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
757 vfitab = _mm_slli_epi32(vfitab,3);
759 /* CUBIC SPLINE TABLE DISPERSION */
760 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
761 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
762 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
763 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
764 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
765 Heps = _mm256_mul_pd(vfeps,H);
766 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
767 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
768 vvdw6 = _mm256_mul_pd(c6_00,VV);
769 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
770 fvdw6 = _mm256_mul_pd(c6_00,FF);
772 /* CUBIC SPLINE TABLE REPULSION */
773 vfitab = _mm_add_epi32(vfitab,ifour);
774 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
775 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
776 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
777 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
778 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
779 Heps = _mm256_mul_pd(vfeps,H);
780 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
781 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
782 vvdw12 = _mm256_mul_pd(c12_00,VV);
783 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
784 fvdw12 = _mm256_mul_pd(c12_00,FF);
785 vvdw = _mm256_add_pd(vvdw12,vvdw6);
786 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
788 /* Update potential sum for this i atom from the interaction with this j atom. */
789 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
790 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
792 fscal = fvdw;
794 fscal = _mm256_andnot_pd(dummy_mask,fscal);
796 /* Calculate temporary vectorial force */
797 tx = _mm256_mul_pd(fscal,dx00);
798 ty = _mm256_mul_pd(fscal,dy00);
799 tz = _mm256_mul_pd(fscal,dz00);
801 /* Update vectorial force */
802 fix0 = _mm256_add_pd(fix0,tx);
803 fiy0 = _mm256_add_pd(fiy0,ty);
804 fiz0 = _mm256_add_pd(fiz0,tz);
806 fjx0 = _mm256_add_pd(fjx0,tx);
807 fjy0 = _mm256_add_pd(fjy0,ty);
808 fjz0 = _mm256_add_pd(fjz0,tz);
810 /**************************
811 * CALCULATE INTERACTIONS *
812 **************************/
814 /* REACTION-FIELD ELECTROSTATICS */
815 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
816 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
818 /* Update potential sum for this i atom from the interaction with this j atom. */
819 velec = _mm256_andnot_pd(dummy_mask,velec);
820 velecsum = _mm256_add_pd(velecsum,velec);
822 fscal = felec;
824 fscal = _mm256_andnot_pd(dummy_mask,fscal);
826 /* Calculate temporary vectorial force */
827 tx = _mm256_mul_pd(fscal,dx11);
828 ty = _mm256_mul_pd(fscal,dy11);
829 tz = _mm256_mul_pd(fscal,dz11);
831 /* Update vectorial force */
832 fix1 = _mm256_add_pd(fix1,tx);
833 fiy1 = _mm256_add_pd(fiy1,ty);
834 fiz1 = _mm256_add_pd(fiz1,tz);
836 fjx1 = _mm256_add_pd(fjx1,tx);
837 fjy1 = _mm256_add_pd(fjy1,ty);
838 fjz1 = _mm256_add_pd(fjz1,tz);
840 /**************************
841 * CALCULATE INTERACTIONS *
842 **************************/
844 /* REACTION-FIELD ELECTROSTATICS */
845 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
846 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
848 /* Update potential sum for this i atom from the interaction with this j atom. */
849 velec = _mm256_andnot_pd(dummy_mask,velec);
850 velecsum = _mm256_add_pd(velecsum,velec);
852 fscal = felec;
854 fscal = _mm256_andnot_pd(dummy_mask,fscal);
856 /* Calculate temporary vectorial force */
857 tx = _mm256_mul_pd(fscal,dx12);
858 ty = _mm256_mul_pd(fscal,dy12);
859 tz = _mm256_mul_pd(fscal,dz12);
861 /* Update vectorial force */
862 fix1 = _mm256_add_pd(fix1,tx);
863 fiy1 = _mm256_add_pd(fiy1,ty);
864 fiz1 = _mm256_add_pd(fiz1,tz);
866 fjx2 = _mm256_add_pd(fjx2,tx);
867 fjy2 = _mm256_add_pd(fjy2,ty);
868 fjz2 = _mm256_add_pd(fjz2,tz);
870 /**************************
871 * CALCULATE INTERACTIONS *
872 **************************/
874 /* REACTION-FIELD ELECTROSTATICS */
875 velec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_add_pd(rinv13,_mm256_mul_pd(krf,rsq13)),crf));
876 felec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
878 /* Update potential sum for this i atom from the interaction with this j atom. */
879 velec = _mm256_andnot_pd(dummy_mask,velec);
880 velecsum = _mm256_add_pd(velecsum,velec);
882 fscal = felec;
884 fscal = _mm256_andnot_pd(dummy_mask,fscal);
886 /* Calculate temporary vectorial force */
887 tx = _mm256_mul_pd(fscal,dx13);
888 ty = _mm256_mul_pd(fscal,dy13);
889 tz = _mm256_mul_pd(fscal,dz13);
891 /* Update vectorial force */
892 fix1 = _mm256_add_pd(fix1,tx);
893 fiy1 = _mm256_add_pd(fiy1,ty);
894 fiz1 = _mm256_add_pd(fiz1,tz);
896 fjx3 = _mm256_add_pd(fjx3,tx);
897 fjy3 = _mm256_add_pd(fjy3,ty);
898 fjz3 = _mm256_add_pd(fjz3,tz);
900 /**************************
901 * CALCULATE INTERACTIONS *
902 **************************/
904 /* REACTION-FIELD ELECTROSTATICS */
905 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
906 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
908 /* Update potential sum for this i atom from the interaction with this j atom. */
909 velec = _mm256_andnot_pd(dummy_mask,velec);
910 velecsum = _mm256_add_pd(velecsum,velec);
912 fscal = felec;
914 fscal = _mm256_andnot_pd(dummy_mask,fscal);
916 /* Calculate temporary vectorial force */
917 tx = _mm256_mul_pd(fscal,dx21);
918 ty = _mm256_mul_pd(fscal,dy21);
919 tz = _mm256_mul_pd(fscal,dz21);
921 /* Update vectorial force */
922 fix2 = _mm256_add_pd(fix2,tx);
923 fiy2 = _mm256_add_pd(fiy2,ty);
924 fiz2 = _mm256_add_pd(fiz2,tz);
926 fjx1 = _mm256_add_pd(fjx1,tx);
927 fjy1 = _mm256_add_pd(fjy1,ty);
928 fjz1 = _mm256_add_pd(fjz1,tz);
930 /**************************
931 * CALCULATE INTERACTIONS *
932 **************************/
934 /* REACTION-FIELD ELECTROSTATICS */
935 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
936 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
938 /* Update potential sum for this i atom from the interaction with this j atom. */
939 velec = _mm256_andnot_pd(dummy_mask,velec);
940 velecsum = _mm256_add_pd(velecsum,velec);
942 fscal = felec;
944 fscal = _mm256_andnot_pd(dummy_mask,fscal);
946 /* Calculate temporary vectorial force */
947 tx = _mm256_mul_pd(fscal,dx22);
948 ty = _mm256_mul_pd(fscal,dy22);
949 tz = _mm256_mul_pd(fscal,dz22);
951 /* Update vectorial force */
952 fix2 = _mm256_add_pd(fix2,tx);
953 fiy2 = _mm256_add_pd(fiy2,ty);
954 fiz2 = _mm256_add_pd(fiz2,tz);
956 fjx2 = _mm256_add_pd(fjx2,tx);
957 fjy2 = _mm256_add_pd(fjy2,ty);
958 fjz2 = _mm256_add_pd(fjz2,tz);
960 /**************************
961 * CALCULATE INTERACTIONS *
962 **************************/
964 /* REACTION-FIELD ELECTROSTATICS */
965 velec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_add_pd(rinv23,_mm256_mul_pd(krf,rsq23)),crf));
966 felec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
968 /* Update potential sum for this i atom from the interaction with this j atom. */
969 velec = _mm256_andnot_pd(dummy_mask,velec);
970 velecsum = _mm256_add_pd(velecsum,velec);
972 fscal = felec;
974 fscal = _mm256_andnot_pd(dummy_mask,fscal);
976 /* Calculate temporary vectorial force */
977 tx = _mm256_mul_pd(fscal,dx23);
978 ty = _mm256_mul_pd(fscal,dy23);
979 tz = _mm256_mul_pd(fscal,dz23);
981 /* Update vectorial force */
982 fix2 = _mm256_add_pd(fix2,tx);
983 fiy2 = _mm256_add_pd(fiy2,ty);
984 fiz2 = _mm256_add_pd(fiz2,tz);
986 fjx3 = _mm256_add_pd(fjx3,tx);
987 fjy3 = _mm256_add_pd(fjy3,ty);
988 fjz3 = _mm256_add_pd(fjz3,tz);
990 /**************************
991 * CALCULATE INTERACTIONS *
992 **************************/
994 /* REACTION-FIELD ELECTROSTATICS */
995 velec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_add_pd(rinv31,_mm256_mul_pd(krf,rsq31)),crf));
996 felec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
998 /* Update potential sum for this i atom from the interaction with this j atom. */
999 velec = _mm256_andnot_pd(dummy_mask,velec);
1000 velecsum = _mm256_add_pd(velecsum,velec);
1002 fscal = felec;
1004 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1006 /* Calculate temporary vectorial force */
1007 tx = _mm256_mul_pd(fscal,dx31);
1008 ty = _mm256_mul_pd(fscal,dy31);
1009 tz = _mm256_mul_pd(fscal,dz31);
1011 /* Update vectorial force */
1012 fix3 = _mm256_add_pd(fix3,tx);
1013 fiy3 = _mm256_add_pd(fiy3,ty);
1014 fiz3 = _mm256_add_pd(fiz3,tz);
1016 fjx1 = _mm256_add_pd(fjx1,tx);
1017 fjy1 = _mm256_add_pd(fjy1,ty);
1018 fjz1 = _mm256_add_pd(fjz1,tz);
1020 /**************************
1021 * CALCULATE INTERACTIONS *
1022 **************************/
1024 /* REACTION-FIELD ELECTROSTATICS */
1025 velec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_add_pd(rinv32,_mm256_mul_pd(krf,rsq32)),crf));
1026 felec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
1028 /* Update potential sum for this i atom from the interaction with this j atom. */
1029 velec = _mm256_andnot_pd(dummy_mask,velec);
1030 velecsum = _mm256_add_pd(velecsum,velec);
1032 fscal = felec;
1034 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1036 /* Calculate temporary vectorial force */
1037 tx = _mm256_mul_pd(fscal,dx32);
1038 ty = _mm256_mul_pd(fscal,dy32);
1039 tz = _mm256_mul_pd(fscal,dz32);
1041 /* Update vectorial force */
1042 fix3 = _mm256_add_pd(fix3,tx);
1043 fiy3 = _mm256_add_pd(fiy3,ty);
1044 fiz3 = _mm256_add_pd(fiz3,tz);
1046 fjx2 = _mm256_add_pd(fjx2,tx);
1047 fjy2 = _mm256_add_pd(fjy2,ty);
1048 fjz2 = _mm256_add_pd(fjz2,tz);
1050 /**************************
1051 * CALCULATE INTERACTIONS *
1052 **************************/
1054 /* REACTION-FIELD ELECTROSTATICS */
1055 velec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_add_pd(rinv33,_mm256_mul_pd(krf,rsq33)),crf));
1056 felec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
1058 /* Update potential sum for this i atom from the interaction with this j atom. */
1059 velec = _mm256_andnot_pd(dummy_mask,velec);
1060 velecsum = _mm256_add_pd(velecsum,velec);
1062 fscal = felec;
1064 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1066 /* Calculate temporary vectorial force */
1067 tx = _mm256_mul_pd(fscal,dx33);
1068 ty = _mm256_mul_pd(fscal,dy33);
1069 tz = _mm256_mul_pd(fscal,dz33);
1071 /* Update vectorial force */
1072 fix3 = _mm256_add_pd(fix3,tx);
1073 fiy3 = _mm256_add_pd(fiy3,ty);
1074 fiz3 = _mm256_add_pd(fiz3,tz);
1076 fjx3 = _mm256_add_pd(fjx3,tx);
1077 fjy3 = _mm256_add_pd(fjy3,ty);
1078 fjz3 = _mm256_add_pd(fjz3,tz);
1080 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1081 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1082 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1083 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1085 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1086 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1087 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1089 /* Inner loop uses 348 flops */
1092 /* End of innermost loop */
1094 gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1095 f+i_coord_offset,fshift+i_shift_offset);
1097 ggid = gid[iidx];
1098 /* Update potential energies */
1099 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1100 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1102 /* Increment number of inner iterations */
1103 inneriter += j_index_end - j_index_start;
1105 /* Outer loop uses 26 flops */
1108 /* Increment number of outer iterations */
1109 outeriter += nri;
1111 /* Update outer/inner flops */
1113 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*348);
1116 * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_double
1117 * Electrostatics interaction: ReactionField
1118 * VdW interaction: CubicSplineTable
1119 * Geometry: Water4-Water4
1120 * Calculate force/pot: Force
1122 void
1123 nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_double
1124 (t_nblist * gmx_restrict nlist,
1125 rvec * gmx_restrict xx,
1126 rvec * gmx_restrict ff,
1127 struct t_forcerec * gmx_restrict fr,
1128 t_mdatoms * gmx_restrict mdatoms,
1129 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1130 t_nrnb * gmx_restrict nrnb)
1132 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1133 * just 0 for non-waters.
1134 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1135 * jnr indices corresponding to data put in the four positions in the SIMD register.
1137 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1138 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1139 int jnrA,jnrB,jnrC,jnrD;
1140 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1141 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1142 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1143 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1144 real rcutoff_scalar;
1145 real *shiftvec,*fshift,*x,*f;
1146 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1147 real scratch[4*DIM];
1148 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1149 real * vdwioffsetptr0;
1150 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1151 real * vdwioffsetptr1;
1152 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1153 real * vdwioffsetptr2;
1154 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1155 real * vdwioffsetptr3;
1156 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1157 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1158 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1159 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1160 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1161 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1162 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1163 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
1164 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1165 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1166 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1167 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1168 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1169 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1170 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1171 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1172 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1173 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1174 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1175 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
1176 real *charge;
1177 int nvdwtype;
1178 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1179 int *vdwtype;
1180 real *vdwparam;
1181 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
1182 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
1183 __m128i vfitab;
1184 __m128i ifour = _mm_set1_epi32(4);
1185 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1186 real *vftab;
1187 __m256d dummy_mask,cutoff_mask;
1188 __m128 tmpmask0,tmpmask1;
1189 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1190 __m256d one = _mm256_set1_pd(1.0);
1191 __m256d two = _mm256_set1_pd(2.0);
1192 x = xx[0];
1193 f = ff[0];
1195 nri = nlist->nri;
1196 iinr = nlist->iinr;
1197 jindex = nlist->jindex;
1198 jjnr = nlist->jjnr;
1199 shiftidx = nlist->shift;
1200 gid = nlist->gid;
1201 shiftvec = fr->shift_vec[0];
1202 fshift = fr->fshift[0];
1203 facel = _mm256_set1_pd(fr->ic->epsfac);
1204 charge = mdatoms->chargeA;
1205 krf = _mm256_set1_pd(fr->ic->k_rf);
1206 krf2 = _mm256_set1_pd(fr->ic->k_rf*2.0);
1207 crf = _mm256_set1_pd(fr->ic->c_rf);
1208 nvdwtype = fr->ntype;
1209 vdwparam = fr->nbfp;
1210 vdwtype = mdatoms->typeA;
1212 vftab = kernel_data->table_vdw->data;
1213 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
1215 /* Setup water-specific parameters */
1216 inr = nlist->iinr[0];
1217 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1218 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1219 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1220 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1222 jq1 = _mm256_set1_pd(charge[inr+1]);
1223 jq2 = _mm256_set1_pd(charge[inr+2]);
1224 jq3 = _mm256_set1_pd(charge[inr+3]);
1225 vdwjidx0A = 2*vdwtype[inr+0];
1226 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1227 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1228 qq11 = _mm256_mul_pd(iq1,jq1);
1229 qq12 = _mm256_mul_pd(iq1,jq2);
1230 qq13 = _mm256_mul_pd(iq1,jq3);
1231 qq21 = _mm256_mul_pd(iq2,jq1);
1232 qq22 = _mm256_mul_pd(iq2,jq2);
1233 qq23 = _mm256_mul_pd(iq2,jq3);
1234 qq31 = _mm256_mul_pd(iq3,jq1);
1235 qq32 = _mm256_mul_pd(iq3,jq2);
1236 qq33 = _mm256_mul_pd(iq3,jq3);
1238 /* Avoid stupid compiler warnings */
1239 jnrA = jnrB = jnrC = jnrD = 0;
1240 j_coord_offsetA = 0;
1241 j_coord_offsetB = 0;
1242 j_coord_offsetC = 0;
1243 j_coord_offsetD = 0;
1245 outeriter = 0;
1246 inneriter = 0;
1248 for(iidx=0;iidx<4*DIM;iidx++)
1250 scratch[iidx] = 0.0;
1253 /* Start outer loop over neighborlists */
1254 for(iidx=0; iidx<nri; iidx++)
1256 /* Load shift vector for this list */
1257 i_shift_offset = DIM*shiftidx[iidx];
1259 /* Load limits for loop over neighbors */
1260 j_index_start = jindex[iidx];
1261 j_index_end = jindex[iidx+1];
1263 /* Get outer coordinate index */
1264 inr = iinr[iidx];
1265 i_coord_offset = DIM*inr;
1267 /* Load i particle coords and add shift vector */
1268 gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1269 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1271 fix0 = _mm256_setzero_pd();
1272 fiy0 = _mm256_setzero_pd();
1273 fiz0 = _mm256_setzero_pd();
1274 fix1 = _mm256_setzero_pd();
1275 fiy1 = _mm256_setzero_pd();
1276 fiz1 = _mm256_setzero_pd();
1277 fix2 = _mm256_setzero_pd();
1278 fiy2 = _mm256_setzero_pd();
1279 fiz2 = _mm256_setzero_pd();
1280 fix3 = _mm256_setzero_pd();
1281 fiy3 = _mm256_setzero_pd();
1282 fiz3 = _mm256_setzero_pd();
1284 /* Start inner kernel loop */
1285 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1288 /* Get j neighbor index, and coordinate index */
1289 jnrA = jjnr[jidx];
1290 jnrB = jjnr[jidx+1];
1291 jnrC = jjnr[jidx+2];
1292 jnrD = jjnr[jidx+3];
1293 j_coord_offsetA = DIM*jnrA;
1294 j_coord_offsetB = DIM*jnrB;
1295 j_coord_offsetC = DIM*jnrC;
1296 j_coord_offsetD = DIM*jnrD;
1298 /* load j atom coordinates */
1299 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1300 x+j_coord_offsetC,x+j_coord_offsetD,
1301 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1302 &jy2,&jz2,&jx3,&jy3,&jz3);
1304 /* Calculate displacement vector */
1305 dx00 = _mm256_sub_pd(ix0,jx0);
1306 dy00 = _mm256_sub_pd(iy0,jy0);
1307 dz00 = _mm256_sub_pd(iz0,jz0);
1308 dx11 = _mm256_sub_pd(ix1,jx1);
1309 dy11 = _mm256_sub_pd(iy1,jy1);
1310 dz11 = _mm256_sub_pd(iz1,jz1);
1311 dx12 = _mm256_sub_pd(ix1,jx2);
1312 dy12 = _mm256_sub_pd(iy1,jy2);
1313 dz12 = _mm256_sub_pd(iz1,jz2);
1314 dx13 = _mm256_sub_pd(ix1,jx3);
1315 dy13 = _mm256_sub_pd(iy1,jy3);
1316 dz13 = _mm256_sub_pd(iz1,jz3);
1317 dx21 = _mm256_sub_pd(ix2,jx1);
1318 dy21 = _mm256_sub_pd(iy2,jy1);
1319 dz21 = _mm256_sub_pd(iz2,jz1);
1320 dx22 = _mm256_sub_pd(ix2,jx2);
1321 dy22 = _mm256_sub_pd(iy2,jy2);
1322 dz22 = _mm256_sub_pd(iz2,jz2);
1323 dx23 = _mm256_sub_pd(ix2,jx3);
1324 dy23 = _mm256_sub_pd(iy2,jy3);
1325 dz23 = _mm256_sub_pd(iz2,jz3);
1326 dx31 = _mm256_sub_pd(ix3,jx1);
1327 dy31 = _mm256_sub_pd(iy3,jy1);
1328 dz31 = _mm256_sub_pd(iz3,jz1);
1329 dx32 = _mm256_sub_pd(ix3,jx2);
1330 dy32 = _mm256_sub_pd(iy3,jy2);
1331 dz32 = _mm256_sub_pd(iz3,jz2);
1332 dx33 = _mm256_sub_pd(ix3,jx3);
1333 dy33 = _mm256_sub_pd(iy3,jy3);
1334 dz33 = _mm256_sub_pd(iz3,jz3);
1336 /* Calculate squared distance and things based on it */
1337 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1338 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1339 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1340 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1341 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1342 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1343 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1344 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1345 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1346 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1348 rinv00 = avx256_invsqrt_d(rsq00);
1349 rinv11 = avx256_invsqrt_d(rsq11);
1350 rinv12 = avx256_invsqrt_d(rsq12);
1351 rinv13 = avx256_invsqrt_d(rsq13);
1352 rinv21 = avx256_invsqrt_d(rsq21);
1353 rinv22 = avx256_invsqrt_d(rsq22);
1354 rinv23 = avx256_invsqrt_d(rsq23);
1355 rinv31 = avx256_invsqrt_d(rsq31);
1356 rinv32 = avx256_invsqrt_d(rsq32);
1357 rinv33 = avx256_invsqrt_d(rsq33);
1359 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1360 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1361 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
1362 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1363 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1364 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
1365 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
1366 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
1367 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
1369 fjx0 = _mm256_setzero_pd();
1370 fjy0 = _mm256_setzero_pd();
1371 fjz0 = _mm256_setzero_pd();
1372 fjx1 = _mm256_setzero_pd();
1373 fjy1 = _mm256_setzero_pd();
1374 fjz1 = _mm256_setzero_pd();
1375 fjx2 = _mm256_setzero_pd();
1376 fjy2 = _mm256_setzero_pd();
1377 fjz2 = _mm256_setzero_pd();
1378 fjx3 = _mm256_setzero_pd();
1379 fjy3 = _mm256_setzero_pd();
1380 fjz3 = _mm256_setzero_pd();
1382 /**************************
1383 * CALCULATE INTERACTIONS *
1384 **************************/
1386 r00 = _mm256_mul_pd(rsq00,rinv00);
1388 /* Calculate table index by multiplying r with table scale and truncate to integer */
1389 rt = _mm256_mul_pd(r00,vftabscale);
1390 vfitab = _mm256_cvttpd_epi32(rt);
1391 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1392 vfitab = _mm_slli_epi32(vfitab,3);
1394 /* CUBIC SPLINE TABLE DISPERSION */
1395 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1396 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1397 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1398 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1399 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1400 Heps = _mm256_mul_pd(vfeps,H);
1401 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1402 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1403 fvdw6 = _mm256_mul_pd(c6_00,FF);
1405 /* CUBIC SPLINE TABLE REPULSION */
1406 vfitab = _mm_add_epi32(vfitab,ifour);
1407 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1408 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1409 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1410 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1411 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1412 Heps = _mm256_mul_pd(vfeps,H);
1413 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1414 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1415 fvdw12 = _mm256_mul_pd(c12_00,FF);
1416 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1418 fscal = fvdw;
1420 /* Calculate temporary vectorial force */
1421 tx = _mm256_mul_pd(fscal,dx00);
1422 ty = _mm256_mul_pd(fscal,dy00);
1423 tz = _mm256_mul_pd(fscal,dz00);
1425 /* Update vectorial force */
1426 fix0 = _mm256_add_pd(fix0,tx);
1427 fiy0 = _mm256_add_pd(fiy0,ty);
1428 fiz0 = _mm256_add_pd(fiz0,tz);
1430 fjx0 = _mm256_add_pd(fjx0,tx);
1431 fjy0 = _mm256_add_pd(fjy0,ty);
1432 fjz0 = _mm256_add_pd(fjz0,tz);
1434 /**************************
1435 * CALCULATE INTERACTIONS *
1436 **************************/
1438 /* REACTION-FIELD ELECTROSTATICS */
1439 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1441 fscal = felec;
1443 /* Calculate temporary vectorial force */
1444 tx = _mm256_mul_pd(fscal,dx11);
1445 ty = _mm256_mul_pd(fscal,dy11);
1446 tz = _mm256_mul_pd(fscal,dz11);
1448 /* Update vectorial force */
1449 fix1 = _mm256_add_pd(fix1,tx);
1450 fiy1 = _mm256_add_pd(fiy1,ty);
1451 fiz1 = _mm256_add_pd(fiz1,tz);
1453 fjx1 = _mm256_add_pd(fjx1,tx);
1454 fjy1 = _mm256_add_pd(fjy1,ty);
1455 fjz1 = _mm256_add_pd(fjz1,tz);
1457 /**************************
1458 * CALCULATE INTERACTIONS *
1459 **************************/
1461 /* REACTION-FIELD ELECTROSTATICS */
1462 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1464 fscal = felec;
1466 /* Calculate temporary vectorial force */
1467 tx = _mm256_mul_pd(fscal,dx12);
1468 ty = _mm256_mul_pd(fscal,dy12);
1469 tz = _mm256_mul_pd(fscal,dz12);
1471 /* Update vectorial force */
1472 fix1 = _mm256_add_pd(fix1,tx);
1473 fiy1 = _mm256_add_pd(fiy1,ty);
1474 fiz1 = _mm256_add_pd(fiz1,tz);
1476 fjx2 = _mm256_add_pd(fjx2,tx);
1477 fjy2 = _mm256_add_pd(fjy2,ty);
1478 fjz2 = _mm256_add_pd(fjz2,tz);
1480 /**************************
1481 * CALCULATE INTERACTIONS *
1482 **************************/
1484 /* REACTION-FIELD ELECTROSTATICS */
1485 felec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
1487 fscal = felec;
1489 /* Calculate temporary vectorial force */
1490 tx = _mm256_mul_pd(fscal,dx13);
1491 ty = _mm256_mul_pd(fscal,dy13);
1492 tz = _mm256_mul_pd(fscal,dz13);
1494 /* Update vectorial force */
1495 fix1 = _mm256_add_pd(fix1,tx);
1496 fiy1 = _mm256_add_pd(fiy1,ty);
1497 fiz1 = _mm256_add_pd(fiz1,tz);
1499 fjx3 = _mm256_add_pd(fjx3,tx);
1500 fjy3 = _mm256_add_pd(fjy3,ty);
1501 fjz3 = _mm256_add_pd(fjz3,tz);
1503 /**************************
1504 * CALCULATE INTERACTIONS *
1505 **************************/
1507 /* REACTION-FIELD ELECTROSTATICS */
1508 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1510 fscal = felec;
1512 /* Calculate temporary vectorial force */
1513 tx = _mm256_mul_pd(fscal,dx21);
1514 ty = _mm256_mul_pd(fscal,dy21);
1515 tz = _mm256_mul_pd(fscal,dz21);
1517 /* Update vectorial force */
1518 fix2 = _mm256_add_pd(fix2,tx);
1519 fiy2 = _mm256_add_pd(fiy2,ty);
1520 fiz2 = _mm256_add_pd(fiz2,tz);
1522 fjx1 = _mm256_add_pd(fjx1,tx);
1523 fjy1 = _mm256_add_pd(fjy1,ty);
1524 fjz1 = _mm256_add_pd(fjz1,tz);
1526 /**************************
1527 * CALCULATE INTERACTIONS *
1528 **************************/
1530 /* REACTION-FIELD ELECTROSTATICS */
1531 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1533 fscal = felec;
1535 /* Calculate temporary vectorial force */
1536 tx = _mm256_mul_pd(fscal,dx22);
1537 ty = _mm256_mul_pd(fscal,dy22);
1538 tz = _mm256_mul_pd(fscal,dz22);
1540 /* Update vectorial force */
1541 fix2 = _mm256_add_pd(fix2,tx);
1542 fiy2 = _mm256_add_pd(fiy2,ty);
1543 fiz2 = _mm256_add_pd(fiz2,tz);
1545 fjx2 = _mm256_add_pd(fjx2,tx);
1546 fjy2 = _mm256_add_pd(fjy2,ty);
1547 fjz2 = _mm256_add_pd(fjz2,tz);
1549 /**************************
1550 * CALCULATE INTERACTIONS *
1551 **************************/
1553 /* REACTION-FIELD ELECTROSTATICS */
1554 felec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
1556 fscal = felec;
1558 /* Calculate temporary vectorial force */
1559 tx = _mm256_mul_pd(fscal,dx23);
1560 ty = _mm256_mul_pd(fscal,dy23);
1561 tz = _mm256_mul_pd(fscal,dz23);
1563 /* Update vectorial force */
1564 fix2 = _mm256_add_pd(fix2,tx);
1565 fiy2 = _mm256_add_pd(fiy2,ty);
1566 fiz2 = _mm256_add_pd(fiz2,tz);
1568 fjx3 = _mm256_add_pd(fjx3,tx);
1569 fjy3 = _mm256_add_pd(fjy3,ty);
1570 fjz3 = _mm256_add_pd(fjz3,tz);
1572 /**************************
1573 * CALCULATE INTERACTIONS *
1574 **************************/
1576 /* REACTION-FIELD ELECTROSTATICS */
1577 felec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
1579 fscal = felec;
1581 /* Calculate temporary vectorial force */
1582 tx = _mm256_mul_pd(fscal,dx31);
1583 ty = _mm256_mul_pd(fscal,dy31);
1584 tz = _mm256_mul_pd(fscal,dz31);
1586 /* Update vectorial force */
1587 fix3 = _mm256_add_pd(fix3,tx);
1588 fiy3 = _mm256_add_pd(fiy3,ty);
1589 fiz3 = _mm256_add_pd(fiz3,tz);
1591 fjx1 = _mm256_add_pd(fjx1,tx);
1592 fjy1 = _mm256_add_pd(fjy1,ty);
1593 fjz1 = _mm256_add_pd(fjz1,tz);
1595 /**************************
1596 * CALCULATE INTERACTIONS *
1597 **************************/
1599 /* REACTION-FIELD ELECTROSTATICS */
1600 felec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
1602 fscal = felec;
1604 /* Calculate temporary vectorial force */
1605 tx = _mm256_mul_pd(fscal,dx32);
1606 ty = _mm256_mul_pd(fscal,dy32);
1607 tz = _mm256_mul_pd(fscal,dz32);
1609 /* Update vectorial force */
1610 fix3 = _mm256_add_pd(fix3,tx);
1611 fiy3 = _mm256_add_pd(fiy3,ty);
1612 fiz3 = _mm256_add_pd(fiz3,tz);
1614 fjx2 = _mm256_add_pd(fjx2,tx);
1615 fjy2 = _mm256_add_pd(fjy2,ty);
1616 fjz2 = _mm256_add_pd(fjz2,tz);
1618 /**************************
1619 * CALCULATE INTERACTIONS *
1620 **************************/
1622 /* REACTION-FIELD ELECTROSTATICS */
1623 felec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
1625 fscal = felec;
1627 /* Calculate temporary vectorial force */
1628 tx = _mm256_mul_pd(fscal,dx33);
1629 ty = _mm256_mul_pd(fscal,dy33);
1630 tz = _mm256_mul_pd(fscal,dz33);
1632 /* Update vectorial force */
1633 fix3 = _mm256_add_pd(fix3,tx);
1634 fiy3 = _mm256_add_pd(fiy3,ty);
1635 fiz3 = _mm256_add_pd(fiz3,tz);
1637 fjx3 = _mm256_add_pd(fjx3,tx);
1638 fjy3 = _mm256_add_pd(fjy3,ty);
1639 fjz3 = _mm256_add_pd(fjz3,tz);
1641 fjptrA = f+j_coord_offsetA;
1642 fjptrB = f+j_coord_offsetB;
1643 fjptrC = f+j_coord_offsetC;
1644 fjptrD = f+j_coord_offsetD;
1646 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1647 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1648 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1650 /* Inner loop uses 294 flops */
1653 if(jidx<j_index_end)
1656 /* Get j neighbor index, and coordinate index */
1657 jnrlistA = jjnr[jidx];
1658 jnrlistB = jjnr[jidx+1];
1659 jnrlistC = jjnr[jidx+2];
1660 jnrlistD = jjnr[jidx+3];
1661 /* Sign of each element will be negative for non-real atoms.
1662 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1663 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1665 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1667 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1668 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1669 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1671 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1672 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1673 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1674 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1675 j_coord_offsetA = DIM*jnrA;
1676 j_coord_offsetB = DIM*jnrB;
1677 j_coord_offsetC = DIM*jnrC;
1678 j_coord_offsetD = DIM*jnrD;
1680 /* load j atom coordinates */
1681 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1682 x+j_coord_offsetC,x+j_coord_offsetD,
1683 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1684 &jy2,&jz2,&jx3,&jy3,&jz3);
1686 /* Calculate displacement vector */
1687 dx00 = _mm256_sub_pd(ix0,jx0);
1688 dy00 = _mm256_sub_pd(iy0,jy0);
1689 dz00 = _mm256_sub_pd(iz0,jz0);
1690 dx11 = _mm256_sub_pd(ix1,jx1);
1691 dy11 = _mm256_sub_pd(iy1,jy1);
1692 dz11 = _mm256_sub_pd(iz1,jz1);
1693 dx12 = _mm256_sub_pd(ix1,jx2);
1694 dy12 = _mm256_sub_pd(iy1,jy2);
1695 dz12 = _mm256_sub_pd(iz1,jz2);
1696 dx13 = _mm256_sub_pd(ix1,jx3);
1697 dy13 = _mm256_sub_pd(iy1,jy3);
1698 dz13 = _mm256_sub_pd(iz1,jz3);
1699 dx21 = _mm256_sub_pd(ix2,jx1);
1700 dy21 = _mm256_sub_pd(iy2,jy1);
1701 dz21 = _mm256_sub_pd(iz2,jz1);
1702 dx22 = _mm256_sub_pd(ix2,jx2);
1703 dy22 = _mm256_sub_pd(iy2,jy2);
1704 dz22 = _mm256_sub_pd(iz2,jz2);
1705 dx23 = _mm256_sub_pd(ix2,jx3);
1706 dy23 = _mm256_sub_pd(iy2,jy3);
1707 dz23 = _mm256_sub_pd(iz2,jz3);
1708 dx31 = _mm256_sub_pd(ix3,jx1);
1709 dy31 = _mm256_sub_pd(iy3,jy1);
1710 dz31 = _mm256_sub_pd(iz3,jz1);
1711 dx32 = _mm256_sub_pd(ix3,jx2);
1712 dy32 = _mm256_sub_pd(iy3,jy2);
1713 dz32 = _mm256_sub_pd(iz3,jz2);
1714 dx33 = _mm256_sub_pd(ix3,jx3);
1715 dy33 = _mm256_sub_pd(iy3,jy3);
1716 dz33 = _mm256_sub_pd(iz3,jz3);
1718 /* Calculate squared distance and things based on it */
1719 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1720 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1721 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1722 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1723 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1724 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1725 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1726 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1727 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1728 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1730 rinv00 = avx256_invsqrt_d(rsq00);
1731 rinv11 = avx256_invsqrt_d(rsq11);
1732 rinv12 = avx256_invsqrt_d(rsq12);
1733 rinv13 = avx256_invsqrt_d(rsq13);
1734 rinv21 = avx256_invsqrt_d(rsq21);
1735 rinv22 = avx256_invsqrt_d(rsq22);
1736 rinv23 = avx256_invsqrt_d(rsq23);
1737 rinv31 = avx256_invsqrt_d(rsq31);
1738 rinv32 = avx256_invsqrt_d(rsq32);
1739 rinv33 = avx256_invsqrt_d(rsq33);
1741 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1742 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1743 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
1744 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1745 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1746 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
1747 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
1748 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
1749 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
1751 fjx0 = _mm256_setzero_pd();
1752 fjy0 = _mm256_setzero_pd();
1753 fjz0 = _mm256_setzero_pd();
1754 fjx1 = _mm256_setzero_pd();
1755 fjy1 = _mm256_setzero_pd();
1756 fjz1 = _mm256_setzero_pd();
1757 fjx2 = _mm256_setzero_pd();
1758 fjy2 = _mm256_setzero_pd();
1759 fjz2 = _mm256_setzero_pd();
1760 fjx3 = _mm256_setzero_pd();
1761 fjy3 = _mm256_setzero_pd();
1762 fjz3 = _mm256_setzero_pd();
1764 /**************************
1765 * CALCULATE INTERACTIONS *
1766 **************************/
1768 r00 = _mm256_mul_pd(rsq00,rinv00);
1769 r00 = _mm256_andnot_pd(dummy_mask,r00);
1771 /* Calculate table index by multiplying r with table scale and truncate to integer */
1772 rt = _mm256_mul_pd(r00,vftabscale);
1773 vfitab = _mm256_cvttpd_epi32(rt);
1774 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1775 vfitab = _mm_slli_epi32(vfitab,3);
1777 /* CUBIC SPLINE TABLE DISPERSION */
1778 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1779 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1780 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1781 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1782 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1783 Heps = _mm256_mul_pd(vfeps,H);
1784 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1785 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1786 fvdw6 = _mm256_mul_pd(c6_00,FF);
1788 /* CUBIC SPLINE TABLE REPULSION */
1789 vfitab = _mm_add_epi32(vfitab,ifour);
1790 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1791 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1792 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1793 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1794 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1795 Heps = _mm256_mul_pd(vfeps,H);
1796 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1797 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1798 fvdw12 = _mm256_mul_pd(c12_00,FF);
1799 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1801 fscal = fvdw;
1803 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1805 /* Calculate temporary vectorial force */
1806 tx = _mm256_mul_pd(fscal,dx00);
1807 ty = _mm256_mul_pd(fscal,dy00);
1808 tz = _mm256_mul_pd(fscal,dz00);
1810 /* Update vectorial force */
1811 fix0 = _mm256_add_pd(fix0,tx);
1812 fiy0 = _mm256_add_pd(fiy0,ty);
1813 fiz0 = _mm256_add_pd(fiz0,tz);
1815 fjx0 = _mm256_add_pd(fjx0,tx);
1816 fjy0 = _mm256_add_pd(fjy0,ty);
1817 fjz0 = _mm256_add_pd(fjz0,tz);
1819 /**************************
1820 * CALCULATE INTERACTIONS *
1821 **************************/
1823 /* REACTION-FIELD ELECTROSTATICS */
1824 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1826 fscal = felec;
1828 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1830 /* Calculate temporary vectorial force */
1831 tx = _mm256_mul_pd(fscal,dx11);
1832 ty = _mm256_mul_pd(fscal,dy11);
1833 tz = _mm256_mul_pd(fscal,dz11);
1835 /* Update vectorial force */
1836 fix1 = _mm256_add_pd(fix1,tx);
1837 fiy1 = _mm256_add_pd(fiy1,ty);
1838 fiz1 = _mm256_add_pd(fiz1,tz);
1840 fjx1 = _mm256_add_pd(fjx1,tx);
1841 fjy1 = _mm256_add_pd(fjy1,ty);
1842 fjz1 = _mm256_add_pd(fjz1,tz);
1844 /**************************
1845 * CALCULATE INTERACTIONS *
1846 **************************/
1848 /* REACTION-FIELD ELECTROSTATICS */
1849 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1851 fscal = felec;
1853 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1855 /* Calculate temporary vectorial force */
1856 tx = _mm256_mul_pd(fscal,dx12);
1857 ty = _mm256_mul_pd(fscal,dy12);
1858 tz = _mm256_mul_pd(fscal,dz12);
1860 /* Update vectorial force */
1861 fix1 = _mm256_add_pd(fix1,tx);
1862 fiy1 = _mm256_add_pd(fiy1,ty);
1863 fiz1 = _mm256_add_pd(fiz1,tz);
1865 fjx2 = _mm256_add_pd(fjx2,tx);
1866 fjy2 = _mm256_add_pd(fjy2,ty);
1867 fjz2 = _mm256_add_pd(fjz2,tz);
1869 /**************************
1870 * CALCULATE INTERACTIONS *
1871 **************************/
1873 /* REACTION-FIELD ELECTROSTATICS */
1874 felec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_mul_pd(rinv13,rinvsq13),krf2));
1876 fscal = felec;
1878 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1880 /* Calculate temporary vectorial force */
1881 tx = _mm256_mul_pd(fscal,dx13);
1882 ty = _mm256_mul_pd(fscal,dy13);
1883 tz = _mm256_mul_pd(fscal,dz13);
1885 /* Update vectorial force */
1886 fix1 = _mm256_add_pd(fix1,tx);
1887 fiy1 = _mm256_add_pd(fiy1,ty);
1888 fiz1 = _mm256_add_pd(fiz1,tz);
1890 fjx3 = _mm256_add_pd(fjx3,tx);
1891 fjy3 = _mm256_add_pd(fjy3,ty);
1892 fjz3 = _mm256_add_pd(fjz3,tz);
1894 /**************************
1895 * CALCULATE INTERACTIONS *
1896 **************************/
1898 /* REACTION-FIELD ELECTROSTATICS */
1899 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1901 fscal = felec;
1903 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1905 /* Calculate temporary vectorial force */
1906 tx = _mm256_mul_pd(fscal,dx21);
1907 ty = _mm256_mul_pd(fscal,dy21);
1908 tz = _mm256_mul_pd(fscal,dz21);
1910 /* Update vectorial force */
1911 fix2 = _mm256_add_pd(fix2,tx);
1912 fiy2 = _mm256_add_pd(fiy2,ty);
1913 fiz2 = _mm256_add_pd(fiz2,tz);
1915 fjx1 = _mm256_add_pd(fjx1,tx);
1916 fjy1 = _mm256_add_pd(fjy1,ty);
1917 fjz1 = _mm256_add_pd(fjz1,tz);
1919 /**************************
1920 * CALCULATE INTERACTIONS *
1921 **************************/
1923 /* REACTION-FIELD ELECTROSTATICS */
1924 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1926 fscal = felec;
1928 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1930 /* Calculate temporary vectorial force */
1931 tx = _mm256_mul_pd(fscal,dx22);
1932 ty = _mm256_mul_pd(fscal,dy22);
1933 tz = _mm256_mul_pd(fscal,dz22);
1935 /* Update vectorial force */
1936 fix2 = _mm256_add_pd(fix2,tx);
1937 fiy2 = _mm256_add_pd(fiy2,ty);
1938 fiz2 = _mm256_add_pd(fiz2,tz);
1940 fjx2 = _mm256_add_pd(fjx2,tx);
1941 fjy2 = _mm256_add_pd(fjy2,ty);
1942 fjz2 = _mm256_add_pd(fjz2,tz);
1944 /**************************
1945 * CALCULATE INTERACTIONS *
1946 **************************/
1948 /* REACTION-FIELD ELECTROSTATICS */
1949 felec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_mul_pd(rinv23,rinvsq23),krf2));
1951 fscal = felec;
1953 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1955 /* Calculate temporary vectorial force */
1956 tx = _mm256_mul_pd(fscal,dx23);
1957 ty = _mm256_mul_pd(fscal,dy23);
1958 tz = _mm256_mul_pd(fscal,dz23);
1960 /* Update vectorial force */
1961 fix2 = _mm256_add_pd(fix2,tx);
1962 fiy2 = _mm256_add_pd(fiy2,ty);
1963 fiz2 = _mm256_add_pd(fiz2,tz);
1965 fjx3 = _mm256_add_pd(fjx3,tx);
1966 fjy3 = _mm256_add_pd(fjy3,ty);
1967 fjz3 = _mm256_add_pd(fjz3,tz);
1969 /**************************
1970 * CALCULATE INTERACTIONS *
1971 **************************/
1973 /* REACTION-FIELD ELECTROSTATICS */
1974 felec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_mul_pd(rinv31,rinvsq31),krf2));
1976 fscal = felec;
1978 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1980 /* Calculate temporary vectorial force */
1981 tx = _mm256_mul_pd(fscal,dx31);
1982 ty = _mm256_mul_pd(fscal,dy31);
1983 tz = _mm256_mul_pd(fscal,dz31);
1985 /* Update vectorial force */
1986 fix3 = _mm256_add_pd(fix3,tx);
1987 fiy3 = _mm256_add_pd(fiy3,ty);
1988 fiz3 = _mm256_add_pd(fiz3,tz);
1990 fjx1 = _mm256_add_pd(fjx1,tx);
1991 fjy1 = _mm256_add_pd(fjy1,ty);
1992 fjz1 = _mm256_add_pd(fjz1,tz);
1994 /**************************
1995 * CALCULATE INTERACTIONS *
1996 **************************/
1998 /* REACTION-FIELD ELECTROSTATICS */
1999 felec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_mul_pd(rinv32,rinvsq32),krf2));
2001 fscal = felec;
2003 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2005 /* Calculate temporary vectorial force */
2006 tx = _mm256_mul_pd(fscal,dx32);
2007 ty = _mm256_mul_pd(fscal,dy32);
2008 tz = _mm256_mul_pd(fscal,dz32);
2010 /* Update vectorial force */
2011 fix3 = _mm256_add_pd(fix3,tx);
2012 fiy3 = _mm256_add_pd(fiy3,ty);
2013 fiz3 = _mm256_add_pd(fiz3,tz);
2015 fjx2 = _mm256_add_pd(fjx2,tx);
2016 fjy2 = _mm256_add_pd(fjy2,ty);
2017 fjz2 = _mm256_add_pd(fjz2,tz);
2019 /**************************
2020 * CALCULATE INTERACTIONS *
2021 **************************/
2023 /* REACTION-FIELD ELECTROSTATICS */
2024 felec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_mul_pd(rinv33,rinvsq33),krf2));
2026 fscal = felec;
2028 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2030 /* Calculate temporary vectorial force */
2031 tx = _mm256_mul_pd(fscal,dx33);
2032 ty = _mm256_mul_pd(fscal,dy33);
2033 tz = _mm256_mul_pd(fscal,dz33);
2035 /* Update vectorial force */
2036 fix3 = _mm256_add_pd(fix3,tx);
2037 fiy3 = _mm256_add_pd(fiy3,ty);
2038 fiz3 = _mm256_add_pd(fiz3,tz);
2040 fjx3 = _mm256_add_pd(fjx3,tx);
2041 fjy3 = _mm256_add_pd(fjy3,ty);
2042 fjz3 = _mm256_add_pd(fjz3,tz);
2044 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2045 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2046 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2047 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2049 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2050 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2051 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2053 /* Inner loop uses 295 flops */
2056 /* End of innermost loop */
2058 gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2059 f+i_coord_offset,fshift+i_shift_offset);
2061 /* Increment number of inner iterations */
2062 inneriter += j_index_end - j_index_start;
2064 /* Outer loop uses 24 flops */
2067 /* Increment number of outer iterations */
2068 outeriter += nri;
2070 /* Update outer/inner flops */
2072 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*295);