1 ;; Machine description for ARM processor synchronization primitives.
2 ;; Copyright (C) 2010-2017 Free Software Foundation, Inc.
3 ;; Written by Marcus Shawcroft (marcus.shawcroft@arm.com)
4 ;; 64bit Atomics by Dave Gilbert (david.gilbert@linaro.org)
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify it
9 ;; under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful, but
14 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 ;; General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>. */
22 (define_mode_attr sync_predtab
23 [(QI "TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER")
24 (HI "TARGET_HAVE_LDREXBH && TARGET_HAVE_MEMORY_BARRIER")
25 (SI "TARGET_HAVE_LDREX && TARGET_HAVE_MEMORY_BARRIER")
26 (DI "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN
27 && TARGET_HAVE_MEMORY_BARRIER")])
29 (define_code_iterator syncop [plus minus ior xor and])
31 (define_code_attr sync_optab
32 [(ior "or") (xor "xor") (and "and") (plus "add") (minus "sub")])
34 (define_mode_attr sync_sfx
35 [(QI "b") (HI "h") (SI "") (DI "d")])
37 (define_expand "memory_barrier"
39 (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
40 "TARGET_HAVE_MEMORY_BARRIER"
42 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
43 MEM_VOLATILE_P (operands[0]) = 1;
46 (define_insn "*memory_barrier"
47 [(set (match_operand:BLK 0 "" "")
48 (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))]
49 "TARGET_HAVE_MEMORY_BARRIER"
56 if (TARGET_HAVE_DMB_MCR)
57 return "mcr\\tp15, 0, r0, c7, c10, 5";
61 [(set_attr "length" "4")
62 (set_attr "conds" "unconditional")
63 (set_attr "predicable" "no")])
65 (define_insn "atomic_load<mode>"
66 [(set (match_operand:QHSI 0 "register_operand" "=r,r,l")
68 [(match_operand:QHSI 1 "arm_sync_memory_operand" "Q,Q,Q")
69 (match_operand:SI 2 "const_int_operand" "n,Pf,n")] ;; model
73 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
74 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
77 return \"ldr<sync_sfx>\\t%0, %1\";
79 return \"ldr<sync_sfx>%?\\t%0, %1\";
84 return \"lda<sync_sfx>\\t%0, %1\";
86 return \"lda<sync_sfx>%?\\t%0, %1\";
89 [(set_attr "arch" "32,v8mb,any")
90 (set_attr "predicable" "yes")
91 (set_attr "predicable_short_it" "no")])
93 (define_insn "atomic_store<mode>"
94 [(set (match_operand:QHSI 0 "memory_operand" "=Q,Q,Q")
96 [(match_operand:QHSI 1 "general_operand" "r,r,l")
97 (match_operand:SI 2 "const_int_operand" "n,Pf,n")] ;; model
101 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
102 if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
105 return \"str<sync_sfx>\t%1, %0\";
107 return \"str<sync_sfx>%?\t%1, %0\";
112 return \"stl<sync_sfx>\t%1, %0\";
114 return \"stl<sync_sfx>%?\t%1, %0\";
117 [(set_attr "arch" "32,v8mb,any")
118 (set_attr "predicable" "yes")
119 (set_attr "predicable_short_it" "no")])
121 ;; An LDRD instruction usable by the atomic_loaddi expander on LPAE targets
123 (define_insn "arm_atomic_loaddi2_ldrd"
124 [(set (match_operand:DI 0 "register_operand" "=r")
126 [(match_operand:DI 1 "arm_sync_memory_operand" "Q")]
127 VUNSPEC_LDRD_ATOMIC))]
128 "ARM_DOUBLEWORD_ALIGN && TARGET_HAVE_LPAE"
129 "ldrd%?\t%0, %H0, %C1"
130 [(set_attr "predicable" "yes")
131 (set_attr "predicable_short_it" "no")])
133 ;; There are three ways to expand this depending on the architecture
134 ;; features available. As for the barriers, a load needs a barrier
135 ;; after it on all non-relaxed memory models except when the load
136 ;; has acquire semantics (for ARMv8-A).
138 (define_expand "atomic_loaddi"
139 [(match_operand:DI 0 "s_register_operand") ;; val out
140 (match_operand:DI 1 "mem_noofs_operand") ;; memory
141 (match_operand:SI 2 "const_int_operand")] ;; model
142 "(TARGET_HAVE_LDREXD || TARGET_HAVE_LPAE || TARGET_HAVE_LDACQEXD)
143 && ARM_DOUBLEWORD_ALIGN"
145 memmodel model = memmodel_from_int (INTVAL (operands[2]));
147 /* For ARMv8-A we can use an LDAEXD to atomically load two 32-bit registers
148 when acquire or stronger semantics are needed. When the relaxed model is
149 used this can be relaxed to a normal LDRD. */
150 if (TARGET_HAVE_LDACQEXD)
152 if (is_mm_relaxed (model))
153 emit_insn (gen_arm_atomic_loaddi2_ldrd (operands[0], operands[1]));
155 emit_insn (gen_arm_load_acquire_exclusivedi (operands[0], operands[1]));
160 /* On LPAE targets LDRD and STRD accesses to 64-bit aligned
161 locations are 64-bit single-copy atomic. We still need barriers in the
162 appropriate places to implement the ordering constraints. */
163 if (TARGET_HAVE_LPAE)
164 emit_insn (gen_arm_atomic_loaddi2_ldrd (operands[0], operands[1]));
166 emit_insn (gen_arm_load_exclusivedi (operands[0], operands[1]));
169 /* All non-relaxed models need a barrier after the load when load-acquire
170 instructions are not available. */
171 if (!is_mm_relaxed (model))
172 expand_mem_thread_fence (model);
177 (define_expand "atomic_compare_and_swap<mode>"
178 [(match_operand:SI 0 "s_register_operand" "") ;; bool out
179 (match_operand:QHSD 1 "s_register_operand" "") ;; val out
180 (match_operand:QHSD 2 "mem_noofs_operand" "") ;; memory
181 (match_operand:QHSD 3 "general_operand" "") ;; expected
182 (match_operand:QHSD 4 "s_register_operand" "") ;; desired
183 (match_operand:SI 5 "const_int_operand") ;; is_weak
184 (match_operand:SI 6 "const_int_operand") ;; mod_s
185 (match_operand:SI 7 "const_int_operand")] ;; mod_f
188 arm_expand_compare_and_swap (operands);
192 ;; Constraints of this pattern must be at least as strict as those of the
193 ;; cbranchsi operations in thumb1.md and aim to be as permissive.
194 (define_insn_and_split "atomic_compare_and_swap<CCSI:arch><NARROW:mode>_1"
195 [(set (match_operand:CCSI 0 "cc_register_operand" "=&c,&l,&l,&l") ;; bool out
196 (unspec_volatile:CCSI [(const_int 0)] VUNSPEC_ATOMIC_CAS))
197 (set (match_operand:SI 1 "s_register_operand" "=&r,&l,&0,&l*h") ;; val out
199 (match_operand:NARROW 2 "mem_noofs_operand" "+Ua,Ua,Ua,Ua"))) ;; memory
201 (unspec_volatile:NARROW
202 [(match_operand:SI 3 "arm_add_operand" "rIL,lIL*h,J,*r") ;; expected
203 (match_operand:NARROW 4 "s_register_operand" "r,r,r,r") ;; desired
204 (match_operand:SI 5 "const_int_operand") ;; is_weak
205 (match_operand:SI 6 "const_int_operand") ;; mod_s
206 (match_operand:SI 7 "const_int_operand")] ;; mod_f
208 (clobber (match_scratch:SI 8 "=&r,X,X,X"))]
211 "&& reload_completed"
214 arm_split_compare_and_swap (operands);
217 [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
219 (define_mode_attr cas_cmp_operand
220 [(SI "arm_add_operand") (DI "cmpdi_operand")])
221 (define_mode_attr cas_cmp_str
222 [(SI "rIL") (DI "rDi")])
224 ;; Constraints of this pattern must be at least as strict as those of the
225 ;; cbranchsi operations in thumb1.md and aim to be as permissive.
226 (define_insn_and_split "atomic_compare_and_swap<CCSI:arch><SIDI:mode>_1"
227 [(set (match_operand:CCSI 0 "cc_register_operand" "=&c,&l,&l,&l") ;; bool out
228 (unspec_volatile:CCSI [(const_int 0)] VUNSPEC_ATOMIC_CAS))
229 (set (match_operand:SIDI 1 "s_register_operand" "=&r,&l,&0,&l*h") ;; val out
230 (match_operand:SIDI 2 "mem_noofs_operand" "+Ua,Ua,Ua,Ua")) ;; memory
232 (unspec_volatile:SIDI
233 [(match_operand:SIDI 3 "<cas_cmp_operand>" "<cas_cmp_str>,lIL*h,J,*r") ;; expect
234 (match_operand:SIDI 4 "s_register_operand" "r,r,r,r") ;; desired
235 (match_operand:SI 5 "const_int_operand") ;; is_weak
236 (match_operand:SI 6 "const_int_operand") ;; mod_s
237 (match_operand:SI 7 "const_int_operand")] ;; mod_f
239 (clobber (match_scratch:SI 8 "=&r,X,X,X"))]
242 "&& reload_completed"
245 arm_split_compare_and_swap (operands);
248 [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
250 (define_insn_and_split "atomic_exchange<mode>"
251 [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r") ;; output
252 (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua")) ;; memory
254 (unspec_volatile:QHSD
255 [(match_operand:QHSD 2 "s_register_operand" "r,r") ;; input
256 (match_operand:SI 3 "const_int_operand" "")] ;; model
257 VUNSPEC_ATOMIC_XCHG))
258 (clobber (reg:CC CC_REGNUM))
259 (clobber (match_scratch:SI 4 "=&r,&l"))]
262 "&& reload_completed"
265 arm_split_atomic_op (SET, operands[0], NULL, operands[1],
266 operands[2], operands[3], operands[4]);
269 [(set_attr "arch" "32,v8mb")])
271 ;; The following mode and code attribute are defined here because they are
272 ;; specific to atomics and are not needed anywhere else.
274 (define_mode_attr atomic_op_operand
275 [(QI "reg_or_int_operand")
276 (HI "reg_or_int_operand")
277 (SI "reg_or_int_operand")
278 (DI "s_register_operand")])
280 (define_mode_attr atomic_op_str
281 [(QI "rn") (HI "rn") (SI "rn") (DI "r")])
283 (define_code_attr thumb1_atomic_op_str
284 [(ior "l,l") (xor "l,l") (and "l,l") (plus "lIJL,r") (minus "lPd,lPd")])
286 (define_code_attr thumb1_atomic_newop_str
287 [(ior "&l,&l") (xor "&l,&l") (and "&l,&l") (plus "&l,&r") (minus "&l,&l")])
289 ;; Constraints of this pattern must be at least as strict as those of the non
290 ;; atomic operations in thumb1.md and aim to be as permissive.
291 (define_insn_and_split "atomic_<sync_optab><mode>"
292 [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua,Ua")
293 (unspec_volatile:QHSD
294 [(syncop:QHSD (match_dup 0)
295 (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>"))
296 (match_operand:SI 2 "const_int_operand")] ;; model
298 (clobber (reg:CC CC_REGNUM))
299 (clobber (match_scratch:QHSD 3 "=&r,<thumb1_atomic_newop_str>"))
300 (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
303 "&& reload_completed"
306 arm_split_atomic_op (<CODE>, NULL, operands[3], operands[0],
307 operands[1], operands[2], operands[4]);
310 [(set_attr "arch" "32,v8mb,v8mb")])
312 ;; Constraints of this pattern must be at least as strict as those of the non
313 ;; atomic NANDs in thumb1.md and aim to be as permissive.
314 (define_insn_and_split "atomic_nand<mode>"
315 [(set (match_operand:QHSD 0 "mem_noofs_operand" "+Ua,Ua")
316 (unspec_volatile:QHSD
318 (and:QHSD (match_dup 0)
319 (match_operand:QHSD 1 "<atomic_op_operand>" "<atomic_op_str>,l")))
320 (match_operand:SI 2 "const_int_operand")] ;; model
322 (clobber (reg:CC CC_REGNUM))
323 (clobber (match_scratch:QHSD 3 "=&r,&l"))
324 (clobber (match_scratch:SI 4 "=&r,&l"))]
327 "&& reload_completed"
330 arm_split_atomic_op (NOT, NULL, operands[3], operands[0],
331 operands[1], operands[2], operands[4]);
334 [(set_attr "arch" "32,v8mb")])
336 ;; 3 alternatives are needed to represent constraints after split from
337 ;; thumb1_addsi3: (i) case where operand1 and destination can be in different
338 ;; registers, (ii) case where they are in the same low register and (iii) case
339 ;; when they are in the same register without restriction on the register. We
340 ;; disparage slightly alternatives that require copying the old value into the
341 ;; register for the new value (see bind_old_new in arm_split_atomic_op).
342 (define_code_attr thumb1_atomic_fetch_op_str
343 [(ior "l,l,l") (xor "l,l,l") (and "l,l,l") (plus "lL,?IJ,?r") (minus "lPd,lPd,lPd")])
345 (define_code_attr thumb1_atomic_fetch_newop_str
346 [(ior "&l,&l,&l") (xor "&l,&l,&l") (and "&l,&l,&l") (plus "&l,&l,&r") (minus "&l,&l,&l")])
348 (define_code_attr thumb1_atomic_fetch_oldop_str
349 [(ior "&r,&r,&r") (xor "&r,&r,&r") (and "&r,&r,&r") (plus "&l,&r,&r") (minus "&l,&l,&l")])
351 ;; Constraints of this pattern must be at least as strict as those of the non
352 ;; atomic operations in thumb1.md and aim to be as permissive.
353 (define_insn_and_split "atomic_fetch_<sync_optab><mode>"
354 [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_fetch_oldop_str>")
355 (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua,Ua"))
357 (unspec_volatile:QHSD
358 [(syncop:QHSD (match_dup 1)
359 (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_fetch_op_str>"))
360 (match_operand:SI 3 "const_int_operand")] ;; model
362 (clobber (reg:CC CC_REGNUM))
363 (clobber (match_scratch:QHSD 4 "=&r,<thumb1_atomic_fetch_newop_str>"))
364 (clobber (match_scratch:SI 5 "=&r,&l,&l,&l"))]
367 "&& reload_completed"
370 arm_split_atomic_op (<CODE>, operands[0], operands[4], operands[1],
371 operands[2], operands[3], operands[5]);
374 [(set_attr "arch" "32,v8mb,v8mb,v8mb")])
376 ;; Constraints of this pattern must be at least as strict as those of the non
377 ;; atomic NANDs in thumb1.md and aim to be as permissive.
378 (define_insn_and_split "atomic_fetch_nand<mode>"
379 [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&r")
380 (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua"))
382 (unspec_volatile:QHSD
384 (and:QHSD (match_dup 1)
385 (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l")))
386 (match_operand:SI 3 "const_int_operand")] ;; model
388 (clobber (reg:CC CC_REGNUM))
389 (clobber (match_scratch:QHSD 4 "=&r,&l"))
390 (clobber (match_scratch:SI 5 "=&r,&l"))]
393 "&& reload_completed"
396 arm_split_atomic_op (NOT, operands[0], operands[4], operands[1],
397 operands[2], operands[3], operands[5]);
400 [(set_attr "arch" "32,v8mb")])
402 ;; Constraints of this pattern must be at least as strict as those of the non
403 ;; atomic operations in thumb1.md and aim to be as permissive.
404 (define_insn_and_split "atomic_<sync_optab>_fetch<mode>"
405 [(set (match_operand:QHSD 0 "s_register_operand" "=&r,<thumb1_atomic_newop_str>")
407 (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua,Ua")
408 (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,<thumb1_atomic_op_str>")))
410 (unspec_volatile:QHSD
411 [(match_dup 1) (match_dup 2)
412 (match_operand:SI 3 "const_int_operand")] ;; model
414 (clobber (reg:CC CC_REGNUM))
415 (clobber (match_scratch:SI 4 "=&r,&l,&l"))]
418 "&& reload_completed"
421 arm_split_atomic_op (<CODE>, NULL, operands[0], operands[1],
422 operands[2], operands[3], operands[4]);
425 [(set_attr "arch" "32,v8mb,v8mb")])
427 ;; Constraints of this pattern must be at least as strict as those of the non
428 ;; atomic NANDs in thumb1.md and aim to be as permissive.
429 (define_insn_and_split "atomic_nand_fetch<mode>"
430 [(set (match_operand:QHSD 0 "s_register_operand" "=&r,&l")
433 (match_operand:QHSD 1 "mem_noofs_operand" "+Ua,Ua")
434 (match_operand:QHSD 2 "<atomic_op_operand>" "<atomic_op_str>,l"))))
436 (unspec_volatile:QHSD
437 [(match_dup 1) (match_dup 2)
438 (match_operand:SI 3 "const_int_operand")] ;; model
440 (clobber (reg:CC CC_REGNUM))
441 (clobber (match_scratch:SI 4 "=&r,&l"))]
444 "&& reload_completed"
447 arm_split_atomic_op (NOT, NULL, operands[0], operands[1],
448 operands[2], operands[3], operands[4]);
451 [(set_attr "arch" "32,v8mb")])
453 (define_insn "arm_load_exclusive<mode>"
454 [(set (match_operand:SI 0 "s_register_operand" "=r,r")
456 (unspec_volatile:NARROW
457 [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
459 "TARGET_HAVE_LDREXBH"
461 ldrex<sync_sfx>%?\t%0, %C1
462 ldrex<sync_sfx>\t%0, %C1"
463 [(set_attr "arch" "32,v8mb")
464 (set_attr "predicable" "yes")
465 (set_attr "predicable_short_it" "no")])
467 (define_insn "arm_load_acquire_exclusive<mode>"
468 [(set (match_operand:SI 0 "s_register_operand" "=r,r")
470 (unspec_volatile:NARROW
471 [(match_operand:NARROW 1 "mem_noofs_operand" "Ua,Ua")]
475 ldaex<sync_sfx>%?\\t%0, %C1
476 ldaex<sync_sfx>\\t%0, %C1"
477 [(set_attr "arch" "32,v8mb")
478 (set_attr "predicable" "yes")
479 (set_attr "predicable_short_it" "no")])
481 (define_insn "arm_load_exclusivesi"
482 [(set (match_operand:SI 0 "s_register_operand" "=r,r")
484 [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
490 [(set_attr "arch" "32,v8mb")
491 (set_attr "predicable" "yes")
492 (set_attr "predicable_short_it" "no")])
494 (define_insn "arm_load_acquire_exclusivesi"
495 [(set (match_operand:SI 0 "s_register_operand" "=r,r")
497 [(match_operand:SI 1 "mem_noofs_operand" "Ua,Ua")]
503 [(set_attr "arch" "32,v8mb")
504 (set_attr "predicable" "yes")
505 (set_attr "predicable_short_it" "no")])
507 (define_insn "arm_load_exclusivedi"
508 [(set (match_operand:DI 0 "s_register_operand" "=r")
510 [(match_operand:DI 1 "mem_noofs_operand" "Ua")]
513 "ldrexd%?\t%0, %H0, %C1"
514 [(set_attr "predicable" "yes")
515 (set_attr "predicable_short_it" "no")])
517 (define_insn "arm_load_acquire_exclusivedi"
518 [(set (match_operand:DI 0 "s_register_operand" "=r")
520 [(match_operand:DI 1 "mem_noofs_operand" "Ua")]
522 "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
523 "ldaexd%?\t%0, %H0, %C1"
524 [(set_attr "predicable" "yes")
525 (set_attr "predicable_short_it" "no")])
527 (define_insn "arm_store_exclusive<mode>"
528 [(set (match_operand:SI 0 "s_register_operand" "=&r")
529 (unspec_volatile:SI [(const_int 0)] VUNSPEC_SC))
530 (set (match_operand:QHSD 1 "mem_noofs_operand" "=Ua")
531 (unspec_volatile:QHSD
532 [(match_operand:QHSD 2 "s_register_operand" "r")]
536 if (<MODE>mode == DImode)
538 /* The restrictions on target registers in ARM mode are that the two
539 registers are consecutive and the first one is even; Thumb is
540 actually more flexible, but DI should give us this anyway.
541 Note that the 1st register always gets the
542 lowest word in memory. */
543 gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
544 return "strexd%?\t%0, %2, %H2, %C1";
547 return "strex<sync_sfx>\t%0, %2, %C1";
549 return "strex<sync_sfx>%?\t%0, %2, %C1";
551 [(set_attr "predicable" "yes")
552 (set_attr "predicable_short_it" "no")])
554 (define_insn "arm_store_release_exclusivedi"
555 [(set (match_operand:SI 0 "s_register_operand" "=&r")
556 (unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
557 (set (match_operand:DI 1 "mem_noofs_operand" "=Ua")
559 [(match_operand:DI 2 "s_register_operand" "r")]
561 "TARGET_HAVE_LDACQEXD && ARM_DOUBLEWORD_ALIGN"
563 /* See comment in arm_store_exclusive<mode> above. */
564 gcc_assert ((REGNO (operands[2]) & 1) == 0 || TARGET_THUMB2);
565 return "stlexd%?\t%0, %2, %H2, %C1";
567 [(set_attr "predicable" "yes")
568 (set_attr "predicable_short_it" "no")])
570 (define_insn "arm_store_release_exclusive<mode>"
571 [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
572 (unspec_volatile:SI [(const_int 0)] VUNSPEC_SLX))
573 (set (match_operand:QHSI 1 "mem_noofs_operand" "=Ua,Ua")
574 (unspec_volatile:QHSI
575 [(match_operand:QHSI 2 "s_register_operand" "r,r")]
579 stlex<sync_sfx>%?\t%0, %2, %C1
580 stlex<sync_sfx>\t%0, %2, %C1"
581 [(set_attr "arch" "32,v8mb")
582 (set_attr "predicable" "yes")
583 (set_attr "predicable_short_it" "no")])