blob: 942b48f81501cfb9599a2c5ee6b11fddb648540d [file] [log] [blame]
Patrick Georgif0bbc952015-03-07 10:57:25 +01001Created from https://github.com/riscv/riscv-gnu-toolchain,
2commit ddce5d17f14831f4957e57c415aca77817c2a82c
3
4diff -urN original-gcc/config.sub gcc/config.sub
5--- original-gcc/config.sub 2013-10-01 18:50:56.000000000 +0200
6+++ gcc-4.9.2/config.sub 2015-03-07 09:57:54.195132741 +0100
7@@ -334,6 +334,9 @@
8 ms1)
9 basic_machine=mt-unknown
10 ;;
11+ riscv)
12+ basic_machine=riscv-ucb
13+ ;;
14
15 strongarm | thumb | xscale)
16 basic_machine=arm-unknown
17diff -urN original-gcc/gcc/common/config/riscv/riscv-common.c gcc/gcc/common/config/riscv/riscv-common.c
18--- original-gcc/gcc/common/config/riscv/riscv-common.c 1970-01-01 01:00:00.000000000 +0100
19+++ gcc-4.9.2/gcc/common/config/riscv/riscv-common.c 2015-03-07 09:51:45.663139025 +0100
20@@ -0,0 +1,128 @@
21+/* Common hooks for RISC-V.
22+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
23+
24+This file is part of GCC.
25+
26+GCC is free software; you can redistribute it and/or modify
27+it under the terms of the GNU General Public License as published by
28+the Free Software Foundation; either version 3, or (at your option)
29+any later version.
30+
31+GCC is distributed in the hope that it will be useful,
32+but WITHOUT ANY WARRANTY; without even the implied warranty of
33+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34+GNU General Public License for more details.
35+
36+You should have received a copy of the GNU General Public License
37+along with GCC; see the file COPYING3. If not see
38+<http://www.gnu.org/licenses/>. */
39+
40+#include "config.h"
41+#include "system.h"
42+#include "coretypes.h"
43+#include "tm.h"
44+#include "common/common-target.h"
45+#include "common/common-target-def.h"
46+#include "opts.h"
47+#include "flags.h"
48+#include "errors.h"
49+
50+/* Parse a RISC-V ISA string into an option mask. */
51+
52+static void
53+riscv_parse_arch_string (const char *isa, int *flags)
54+{
55+ const char *p = isa;
56+
57+ if (strncmp (p, "RV32", 4) == 0)
58+ *flags |= MASK_32BIT, p += 4;
59+ else if (strncmp (p, "RV64", 4) == 0)
60+ *flags &= ~MASK_32BIT, p += 4;
61+
62+ if (*p++ != 'I')
63+ {
64+ error ("-march=%s: ISA strings must begin with I, RV32I, or RV64I", isa);
65+ return;
66+ }
67+
68+ *flags &= ~MASK_MULDIV;
69+ if (*p == 'M')
70+ *flags |= MASK_MULDIV, p++;
71+
72+ *flags &= ~MASK_ATOMIC;
73+ if (*p == 'A')
74+ *flags |= MASK_ATOMIC, p++;
75+
76+ *flags |= MASK_SOFT_FLOAT_ABI;
77+ if (*p == 'F')
78+ *flags &= ~MASK_SOFT_FLOAT_ABI, p++;
79+
80+ if (*p == 'D')
81+ {
82+ p++;
83+ if (!TARGET_HARD_FLOAT)
84+ {
85+ error ("-march=%s: the D extension requires the F extension", isa);
86+ return;
87+ }
88+ }
89+ else if (TARGET_HARD_FLOAT)
90+ {
91+ error ("-march=%s: single-precision-only is not yet supported", isa);
92+ return;
93+ }
94+
95+ if (*p)
96+ {
97+ error ("-march=%s: unsupported ISA substring %s", isa, p);
98+ return;
99+ }
100+}
101+
102+static int
103+riscv_flags_from_arch_string (const char *isa)
104+{
105+ int flags = 0;
106+ riscv_parse_arch_string (isa, &flags);
107+ return flags;
108+}
109+
110+/* Implement TARGET_HANDLE_OPTION. */
111+
112+static bool
113+riscv_handle_option (struct gcc_options *opts,
114+ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
115+ const struct cl_decoded_option *decoded,
116+ location_t loc ATTRIBUTE_UNUSED)
117+{
118+ switch (decoded->opt_index)
119+ {
120+ case OPT_march_:
121+ riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
122+ return true;
123+
124+ default:
125+ return true;
126+ }
127+}
128+
129+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
130+static const struct default_options riscv_option_optimization_table[] =
131+ {
132+ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
133+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
134+ { OPT_LEVELS_NONE, 0, NULL, 0 }
135+ };
136+
137+#undef TARGET_OPTION_OPTIMIZATION_TABLE
138+#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
139+
140+#undef TARGET_DEFAULT_TARGET_FLAGS
141+#define TARGET_DEFAULT_TARGET_FLAGS \
142+ (riscv_flags_from_arch_string (RISCV_ARCH_STRING_DEFAULT) \
143+ | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
144+
145+#undef TARGET_HANDLE_OPTION
146+#define TARGET_HANDLE_OPTION riscv_handle_option
147+
148+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
149diff -urN original-gcc/gcc/config/riscv/constraints.md gcc/gcc/config/riscv/constraints.md
150--- original-gcc/gcc/config/riscv/constraints.md 1970-01-01 01:00:00.000000000 +0100
151+++ gcc-4.9.2/gcc/config/riscv/constraints.md 2015-03-07 09:51:45.663139025 +0100
152@@ -0,0 +1,90 @@
153+;; Constraint definitions for RISC-V target.
154+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
155+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
156+;; Based on MIPS target for GNU compiler.
157+;;
158+;; This file is part of GCC.
159+;;
160+;; GCC is free software; you can redistribute it and/or modify
161+;; it under the terms of the GNU General Public License as published by
162+;; the Free Software Foundation; either version 3, or (at your option)
163+;; any later version.
164+;;
165+;; GCC is distributed in the hope that it will be useful,
166+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
167+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
168+;; GNU General Public License for more details.
169+;;
170+;; You should have received a copy of the GNU General Public License
171+;; along with GCC; see the file COPYING3. If not see
172+;; <http://www.gnu.org/licenses/>.
173+
174+;; Register constraints
175+
176+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
177+ "A floating-point register (if available).")
178+
179+(define_register_constraint "b" "ALL_REGS"
180+ "@internal")
181+
182+(define_register_constraint "j" "T_REGS"
183+ "@internal")
184+
185+;; Integer constraints
186+
187+(define_constraint "Z"
188+ "@internal"
189+ (and (match_code "const_int")
190+ (match_test "1")))
191+
192+(define_constraint "I"
193+ "An I-type 12-bit signed immediate."
194+ (and (match_code "const_int")
195+ (match_test "SMALL_OPERAND (ival)")))
196+
197+(define_constraint "J"
198+ "Integer zero."
199+ (and (match_code "const_int")
200+ (match_test "ival == 0")))
201+
202+;; Floating-point constraints
203+
204+(define_constraint "G"
205+ "Floating-point zero."
206+ (and (match_code "const_double")
207+ (match_test "op == CONST0_RTX (mode)")))
208+
209+;; General constraints
210+
211+(define_constraint "Q"
212+ "@internal"
213+ (match_operand 0 "const_arith_operand"))
214+
215+(define_memory_constraint "A"
216+ "An address that is held in a general-purpose register."
217+ (and (match_code "mem")
218+ (match_test "GET_CODE(XEXP(op,0)) == REG")))
219+
220+(define_constraint "S"
221+ "@internal
222+ A constant call address."
223+ (and (match_operand 0 "call_insn_operand")
224+ (match_test "CONSTANT_P (op)")))
225+
226+(define_constraint "T"
227+ "@internal
228+ A constant @code{move_operand}."
229+ (and (match_operand 0 "move_operand")
230+ (match_test "CONSTANT_P (op)")))
231+
232+(define_memory_constraint "W"
233+ "@internal
234+ A memory address based on a member of @code{BASE_REG_CLASS}."
235+ (and (match_code "mem")
236+ (match_operand 0 "memory_operand")))
237+
238+(define_constraint "YG"
239+ "@internal
240+ A vector zero."
241+ (and (match_code "const_vector")
242+ (match_test "op == CONST0_RTX (mode)")))
243diff -urN original-gcc/gcc/config/riscv/default-32.h gcc/gcc/config/riscv/default-32.h
244--- original-gcc/gcc/config/riscv/default-32.h 1970-01-01 01:00:00.000000000 +0100
245+++ gcc-4.9.2/gcc/config/riscv/default-32.h 2015-03-07 09:51:45.663139025 +0100
246@@ -0,0 +1,22 @@
247+/* Definitions of target machine for GCC, for RISC-V,
248+ defaulting to 32-bit code generation.
249+
250+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
251+
252+This file is part of GCC.
253+
254+GCC is free software; you can redistribute it and/or modify
255+it under the terms of the GNU General Public License as published by
256+the Free Software Foundation; either version 3, or (at your option)
257+any later version.
258+
259+GCC is distributed in the hope that it will be useful,
260+but WITHOUT ANY WARRANTY; without even the implied warranty of
261+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
262+GNU General Public License for more details.
263+
264+You should have received a copy of the GNU General Public License
265+along with GCC; see the file COPYING3. If not see
266+<http://www.gnu.org/licenses/>. */
267+
268+#define TARGET_64BIT_DEFAULT 0
269diff -urN original-gcc/gcc/config/riscv/elf.h gcc/gcc/config/riscv/elf.h
270--- original-gcc/gcc/config/riscv/elf.h 1970-01-01 01:00:00.000000000 +0100
271+++ gcc-4.9.2/gcc/config/riscv/elf.h 2015-03-07 09:51:45.663139025 +0100
272@@ -0,0 +1,31 @@
273+/* Target macros for riscv*-elf targets.
274+ Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
275+ Free Software Foundation, Inc.
276+
277+This file is part of GCC.
278+
279+GCC is free software; you can redistribute it and/or modify
280+it under the terms of the GNU General Public License as published by
281+the Free Software Foundation; either version 3, or (at your option)
282+any later version.
283+
284+GCC is distributed in the hope that it will be useful,
285+but WITHOUT ANY WARRANTY; without even the implied warranty of
286+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
287+GNU General Public License for more details.
288+
289+You should have received a copy of the GNU General Public License
290+along with GCC; see the file COPYING3. If not see
291+<http://www.gnu.org/licenses/>. */
292+
293+/* Leave the linker script to choose the appropriate libraries. */
294+#undef LIB_SPEC
295+#define LIB_SPEC ""
296+
297+#undef STARTFILE_SPEC
298+#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
299+
300+#undef ENDFILE_SPEC
301+#define ENDFILE_SPEC "crtend%O%s"
302+
303+#define NO_IMPLICIT_EXTERN_C 1
304diff -urN original-gcc/gcc/config/riscv/generic.md gcc/gcc/config/riscv/generic.md
305--- original-gcc/gcc/config/riscv/generic.md 1970-01-01 01:00:00.000000000 +0100
306+++ gcc-4.9.2/gcc/config/riscv/generic.md 2015-03-07 09:51:45.663139025 +0100
307@@ -0,0 +1,98 @@
308+;; Generic DFA-based pipeline description for RISC-V targets.
309+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
310+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
311+;; Based on MIPS target for GNU compiler.
312+
313+;; This file is part of GCC.
314+
315+;; GCC is free software; you can redistribute it and/or modify it
316+;; under the terms of the GNU General Public License as published
317+;; by the Free Software Foundation; either version 3, or (at your
318+;; option) any later version.
319+
320+;; GCC is distributed in the hope that it will be useful, but WITHOUT
321+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
322+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
323+;; License for more details.
324+
325+;; You should have received a copy of the GNU General Public License
326+;; along with GCC; see the file COPYING3. If not see
327+;; <http://www.gnu.org/licenses/>.
328+
329+
330+;; This file is derived from the old define_function_unit description.
331+;; Each reservation can be overridden on a processor-by-processor basis.
332+
333+(define_insn_reservation "generic_alu" 1
334+ (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
335+ "alu")
336+
337+(define_insn_reservation "generic_load" 3
338+ (eq_attr "type" "load,fpload,fpidxload")
339+ "alu")
340+
341+(define_insn_reservation "generic_store" 1
342+ (eq_attr "type" "store,fpstore,fpidxstore")
343+ "alu")
344+
345+(define_insn_reservation "generic_xfer" 2
346+ (eq_attr "type" "mfc,mtc")
347+ "alu")
348+
349+(define_insn_reservation "generic_branch" 1
350+ (eq_attr "type" "branch,jump,call")
351+ "alu")
352+
353+(define_insn_reservation "generic_imul" 17
354+ (eq_attr "type" "imul")
355+ "imuldiv*17")
356+
357+(define_insn_reservation "generic_idiv" 38
358+ (eq_attr "type" "idiv")
359+ "imuldiv*38")
360+
361+(define_insn_reservation "generic_fcvt" 1
362+ (eq_attr "type" "fcvt")
363+ "alu")
364+
365+(define_insn_reservation "generic_fmove" 2
366+ (eq_attr "type" "fmove")
367+ "alu")
368+
369+(define_insn_reservation "generic_fcmp" 3
370+ (eq_attr "type" "fcmp")
371+ "alu")
372+
373+(define_insn_reservation "generic_fadd" 4
374+ (eq_attr "type" "fadd")
375+ "alu")
376+
377+(define_insn_reservation "generic_fmul_single" 7
378+ (and (eq_attr "type" "fmul,fmadd")
379+ (eq_attr "mode" "SF"))
380+ "alu")
381+
382+(define_insn_reservation "generic_fmul_double" 8
383+ (and (eq_attr "type" "fmul,fmadd")
384+ (eq_attr "mode" "DF"))
385+ "alu")
386+
387+(define_insn_reservation "generic_fdiv_single" 23
388+ (and (eq_attr "type" "fdiv")
389+ (eq_attr "mode" "SF"))
390+ "alu")
391+
392+(define_insn_reservation "generic_fdiv_double" 36
393+ (and (eq_attr "type" "fdiv")
394+ (eq_attr "mode" "DF"))
395+ "alu")
396+
397+(define_insn_reservation "generic_fsqrt_single" 54
398+ (and (eq_attr "type" "fsqrt")
399+ (eq_attr "mode" "SF"))
400+ "alu")
401+
402+(define_insn_reservation "generic_fsqrt_double" 112
403+ (and (eq_attr "type" "fsqrt")
404+ (eq_attr "mode" "DF"))
405+ "alu")
406diff -urN original-gcc/gcc/config/riscv/linux64.h gcc/gcc/config/riscv/linux64.h
407--- original-gcc/gcc/config/riscv/linux64.h 1970-01-01 01:00:00.000000000 +0100
408+++ gcc-4.9.2/gcc/config/riscv/linux64.h 2015-03-07 09:51:45.663139025 +0100
409@@ -0,0 +1,43 @@
410+/* Definitions for 64-bit RISC-V GNU/Linux systems with ELF format.
411+ Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
412+ Free Software Foundation, Inc.
413+
414+This file is part of GCC.
415+
416+GCC is free software; you can redistribute it and/or modify
417+it under the terms of the GNU General Public License as published by
418+the Free Software Foundation; either version 3, or (at your option)
419+any later version.
420+
421+GCC is distributed in the hope that it will be useful,
422+but WITHOUT ANY WARRANTY; without even the implied warranty of
423+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
424+GNU General Public License for more details.
425+
426+You should have received a copy of the GNU General Public License
427+along with GCC; see the file COPYING3. If not see
428+<http://www.gnu.org/licenses/>. */
429+
430+/* Force the default ABI flags onto the command line
431+ in order to make the other specs easier to write. */
432+#undef LIB_SPEC
433+#define LIB_SPEC "\
434+%{pthread:-lpthread} \
435+%{shared:-lc} \
436+%{!shared: \
437+ %{profile:-lc_p} %{!profile:-lc}}"
438+
439+#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
440+#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
441+
442+#undef LINK_SPEC
443+#define LINK_SPEC "\
444+%{shared} \
445+ %{!shared: \
446+ %{!static: \
447+ %{rdynamic:-export-dynamic} \
448+ %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
449+ %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
450+ %{static:-static}} \
451+%{" OPT_ARCH64 ":-melf64lriscv} \
452+%{" OPT_ARCH32 ":-melf32lriscv}"
453diff -urN original-gcc/gcc/config/riscv/linux.h gcc/gcc/config/riscv/linux.h
454--- original-gcc/gcc/config/riscv/linux.h 1970-01-01 01:00:00.000000000 +0100
455+++ gcc-4.9.2/gcc/config/riscv/linux.h 2015-03-07 09:51:45.663139025 +0100
456@@ -0,0 +1,60 @@
457+/* Definitions for RISC-V GNU/Linux systems with ELF format.
458+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
459+ 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
460+
461+This file is part of GCC.
462+
463+GCC is free software; you can redistribute it and/or modify
464+it under the terms of the GNU General Public License as published by
465+the Free Software Foundation; either version 3, or (at your option)
466+any later version.
467+
468+GCC is distributed in the hope that it will be useful,
469+but WITHOUT ANY WARRANTY; without even the implied warranty of
470+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
471+GNU General Public License for more details.
472+
473+You should have received a copy of the GNU General Public License
474+along with GCC; see the file COPYING3. If not see
475+<http://www.gnu.org/licenses/>. */
476+
477+#undef WCHAR_TYPE
478+#define WCHAR_TYPE "int"
479+
480+#undef WCHAR_TYPE_SIZE
481+#define WCHAR_TYPE_SIZE 32
482+
483+#define TARGET_OS_CPP_BUILTINS() \
484+ do { \
485+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
486+ /* The GNU C++ standard library requires this. */ \
487+ if (c_dialect_cxx ()) \
488+ builtin_define ("_GNU_SOURCE"); \
489+ } while (0)
490+
491+#undef SUBTARGET_CPP_SPEC
492+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
493+
494+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
495+
496+/* Borrowed from sparc/linux.h */
497+#undef LINK_SPEC
498+#define LINK_SPEC \
499+ "%{shared:-shared} \
500+ %{!shared: \
501+ %{!static: \
502+ %{rdynamic:-export-dynamic} \
503+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
504+ %{static:-static}}"
505+
506+#undef LIB_SPEC
507+#define LIB_SPEC "\
508+%{pthread:-lpthread} \
509+%{shared:-lc} \
510+%{!shared: \
511+ %{profile:-lc_p} %{!profile:-lc}}"
512+
513+/* Similar to standard Linux, but adding -ffast-math support. */
514+#undef ENDFILE_SPEC
515+#define ENDFILE_SPEC \
516+ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
517diff -urN original-gcc/gcc/config/riscv/opcode-riscv.h gcc/gcc/config/riscv/opcode-riscv.h
518--- original-gcc/gcc/config/riscv/opcode-riscv.h 1970-01-01 01:00:00.000000000 +0100
519+++ gcc-4.9.2/gcc/config/riscv/opcode-riscv.h 2015-03-07 09:51:45.663139025 +0100
520@@ -0,0 +1,149 @@
521+/* RISC-V ISA encoding.
522+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
523+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
524+ Based on MIPS target for GNU compiler.
525+
526+This file is part of GDB, GAS, and the GNU binutils.
527+
528+GDB, GAS, and the GNU binutils are free software; you can redistribute
529+them and/or modify them under the terms of the GNU General Public
530+License as published by the Free Software Foundation; either version
531+1, or (at your option) any later version.
532+
533+GDB, GAS, and the GNU binutils are distributed in the hope that they
534+will be useful, but WITHOUT ANY WARRANTY; without even the implied
535+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
536+the GNU General Public License for more details.
537+
538+You should have received a copy of the GNU General Public License
539+along with this file; see the file COPYING. If not, write to the Free
540+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
541+
542+#ifndef _RISCV_H_
543+#define _RISCV_H_
544+
545+#define RV_X(x, s, n) (((x) >> (s)) & ((1<<(n))-1))
546+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
547+
548+#define EXTRACT_ITYPE_IMM(x) \
549+ (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12))
550+#define EXTRACT_STYPE_IMM(x) \
551+ (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12))
552+#define EXTRACT_SBTYPE_IMM(x) \
553+ ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12))
554+#define EXTRACT_UTYPE_IMM(x) \
555+ ((RV_X(x, 12, 20) << 20) | (RV_IMM_SIGN(x) << 32))
556+#define EXTRACT_UJTYPE_IMM(x) \
557+ ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20))
558+
559+#define ENCODE_ITYPE_IMM(x) \
560+ (RV_X(x, 0, 12) << 20)
561+#define ENCODE_STYPE_IMM(x) \
562+ ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25))
563+#define ENCODE_SBTYPE_IMM(x) \
564+ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
565+#define ENCODE_UTYPE_IMM(x) \
566+ (RV_X(x, 12, 20) << 12)
567+#define ENCODE_UJTYPE_IMM(x) \
568+ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
569+
570+#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
571+#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
572+#define VALID_SBTYPE_IMM(x) (EXTRACT_SBTYPE_IMM(ENCODE_SBTYPE_IMM(x)) == (x))
573+#define VALID_UTYPE_IMM(x) (EXTRACT_UTYPE_IMM(ENCODE_UTYPE_IMM(x)) == (x))
574+#define VALID_UJTYPE_IMM(x) (EXTRACT_UJTYPE_IMM(ENCODE_UJTYPE_IMM(x)) == (x))
575+
576+#define RISCV_RTYPE(insn, rd, rs1, rs2) \
577+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2))
578+#define RISCV_ITYPE(insn, rd, rs1, imm) \
579+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ENCODE_ITYPE_IMM(imm))
580+#define RISCV_STYPE(insn, rs1, rs2, imm) \
581+ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_STYPE_IMM(imm))
582+#define RISCV_SBTYPE(insn, rs1, rs2, target) \
583+ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_SBTYPE_IMM(target))
584+#define RISCV_UTYPE(insn, rd, bigimm) \
585+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UTYPE_IMM(bigimm))
586+#define RISCV_UJTYPE(insn, rd, target) \
587+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UJTYPE_IMM(target))
588+
589+#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0)
590+
591+#define RISCV_CONST_HIGH_PART(VALUE) \
592+ (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
593+#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE))
594+
595+/* RV fields */
596+
597+#define OP_MASK_OP 0x7f
598+#define OP_SH_OP 0
599+#define OP_MASK_RS2 0x1f
600+#define OP_SH_RS2 20
601+#define OP_MASK_RS1 0x1f
602+#define OP_SH_RS1 15
603+#define OP_MASK_RS3 0x1f
604+#define OP_SH_RS3 27
605+#define OP_MASK_RD 0x1f
606+#define OP_SH_RD 7
607+#define OP_MASK_SHAMT 0x3f
608+#define OP_SH_SHAMT 20
609+#define OP_MASK_SHAMTW 0x1f
610+#define OP_SH_SHAMTW 20
611+#define OP_MASK_RM 0x7
612+#define OP_SH_RM 12
613+#define OP_MASK_PRED 0xf
614+#define OP_SH_PRED 24
615+#define OP_MASK_SUCC 0xf
616+#define OP_SH_SUCC 20
617+#define OP_MASK_AQ 0x1
618+#define OP_SH_AQ 26
619+#define OP_MASK_RL 0x1
620+#define OP_SH_RL 25
621+
622+#define OP_MASK_VRD 0x1f
623+#define OP_SH_VRD 7
624+#define OP_MASK_VRS 0x1f
625+#define OP_SH_VRS 15
626+#define OP_MASK_VRT 0x1f
627+#define OP_SH_VRT 20
628+#define OP_MASK_VRR 0x1f
629+#define OP_SH_VRR 25
630+
631+#define OP_MASK_VFD 0x1f
632+#define OP_SH_VFD 7
633+#define OP_MASK_VFS 0x1f
634+#define OP_SH_VFS 15
635+#define OP_MASK_VFT 0x1f
636+#define OP_SH_VFT 20
637+#define OP_MASK_VFR 0x1f
638+#define OP_SH_VFR 25
639+
640+#define OP_MASK_IMMNGPR 0x3f
641+#define OP_SH_IMMNGPR 20
642+#define OP_MASK_IMMNFPR 0x3f
643+#define OP_SH_IMMNFPR 26
644+#define OP_MASK_IMMSEGNELM 0x1f
645+#define OP_SH_IMMSEGNELM 17
646+#define OP_MASK_IMMSEGSTNELM 0x1f
647+#define OP_SH_IMMSEGSTNELM 12
648+#define OP_MASK_CUSTOM_IMM 0x7f
649+#define OP_SH_CUSTOM_IMM 25
650+
651+#define LINK_REG 1
652+
653+#define RISCV_JUMP_BITS RISCV_BIGIMM_BITS
654+#define RISCV_JUMP_ALIGN_BITS 1
655+#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS)
656+#define RISCV_JUMP_REACH ((1ULL<<RISCV_JUMP_BITS)*RISCV_JUMP_ALIGN)
657+
658+#define RISCV_IMM_BITS 12
659+#define RISCV_BIGIMM_BITS (32-RISCV_IMM_BITS)
660+#define RISCV_IMM_REACH (1LL<<RISCV_IMM_BITS)
661+#define RISCV_BIGIMM_REACH (1LL<<RISCV_BIGIMM_BITS)
662+#define RISCV_BRANCH_BITS RISCV_IMM_BITS
663+#define RISCV_BRANCH_ALIGN_BITS RISCV_JUMP_ALIGN_BITS
664+#define RISCV_BRANCH_ALIGN (1 << RISCV_BRANCH_ALIGN_BITS)
665+#define RISCV_BRANCH_REACH (RISCV_IMM_REACH*RISCV_BRANCH_ALIGN)
666+
667+#include "riscv-opc.h"
668+
669+#endif /* _RISCV_H_ */
670diff -urN original-gcc/gcc/config/riscv/peephole.md gcc/gcc/config/riscv/peephole.md
671--- original-gcc/gcc/config/riscv/peephole.md 1970-01-01 01:00:00.000000000 +0100
672+++ gcc-4.9.2/gcc/config/riscv/peephole.md 2015-03-07 09:51:45.663139025 +0100
673@@ -0,0 +1,100 @@
674+;;........................
675+;; DI -> SI optimizations
676+;;........................
677+
678+;; Simplify (int)(a + 1), etc.
679+(define_peephole2
680+ [(set (match_operand:DI 0 "register_operand")
681+ (match_operator:DI 4 "modular_operator"
682+ [(match_operand:DI 1 "register_operand")
683+ (match_operand:DI 2 "arith_operand")]))
684+ (set (match_operand:SI 3 "register_operand")
685+ (truncate:SI (match_dup 0)))]
686+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
687+ && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
688+ [(set (match_dup 3)
689+ (truncate:SI
690+ (match_op_dup:DI 4
691+ [(match_operand:DI 1 "register_operand")
692+ (match_operand:DI 2 "arith_operand")])))])
693+
694+;; Simplify (int)a + 1, etc.
695+(define_peephole2
696+ [(set (match_operand:SI 0 "register_operand")
697+ (truncate:SI (match_operand:DI 1 "register_operand")))
698+ (set (match_operand:SI 3 "register_operand")
699+ (match_operator:SI 4 "modular_operator"
700+ [(match_dup 0)
701+ (match_operand:SI 2 "arith_operand")]))]
702+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
703+ [(set (match_dup 3)
704+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
705+
706+;; Simplify -(int)a, etc.
707+(define_peephole2
708+ [(set (match_operand:SI 0 "register_operand")
709+ (truncate:SI (match_operand:DI 2 "register_operand")))
710+ (set (match_operand:SI 3 "register_operand")
711+ (match_operator:SI 4 "modular_operator"
712+ [(match_operand:SI 1 "reg_or_0_operand")
713+ (match_dup 0)]))]
714+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
715+ [(set (match_dup 3)
716+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
717+
718+;; Simplify PIC loads to static variables.
719+;; These will go away once we figure out how to emit auipc discretely.
720+(define_insn "*local_pic_load<mode>"
721+ [(set (match_operand:ANYI 0 "register_operand" "=r")
722+ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
723+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
724+ "<load>\t%0,%1"
725+ [(set (attr "length") (const_int 8))])
726+(define_insn "*local_pic_load<mode>"
727+ [(set (match_operand:ANYF 0 "register_operand" "=f")
728+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
729+ (clobber (match_scratch:DI 2 "=&r"))]
730+ "TARGET_HARD_FLOAT && TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
731+ "<load>\t%0,%1,%2"
732+ [(set (attr "length") (const_int 8))])
733+(define_insn "*local_pic_load<mode>"
734+ [(set (match_operand:ANYF 0 "register_operand" "=f")
735+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
736+ (clobber (match_scratch:SI 2 "=&r"))]
737+ "TARGET_HARD_FLOAT && !TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
738+ "<load>\t%0,%1,%2"
739+ [(set (attr "length") (const_int 8))])
740+(define_insn "*local_pic_loadu<mode>"
741+ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
742+ (zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
743+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
744+ "<load>u\t%0,%1"
745+ [(set (attr "length") (const_int 8))])
746+(define_insn "*local_pic_storedi<mode>"
747+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
748+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
749+ (clobber (match_scratch:DI 2 "=&r"))]
750+ "TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
751+ "<store>\t%z1,%0,%2"
752+ [(set (attr "length") (const_int 8))])
753+(define_insn "*local_pic_storesi<mode>"
754+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
755+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
756+ (clobber (match_scratch:SI 2 "=&r"))]
757+ "!TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
758+ "<store>\t%z1,%0,%2"
759+ [(set (attr "length") (const_int 8))])
760+(define_insn "*local_pic_storedi<mode>"
761+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
762+ (match_operand:ANYF 1 "register_operand" "f"))
763+ (clobber (match_scratch:DI 2 "=&r"))]
764+ "TARGET_HARD_FLOAT && TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
765+ "<store>\t%1,%0,%2"
766+ [(set (attr "length") (const_int 8))])
767+(define_insn "*local_pic_storesi<mode>"
768+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
769+ (match_operand:ANYF 1 "register_operand" "f"))
770+ (clobber (match_scratch:SI 2 "=&r"))]
771+ "TARGET_HARD_FLOAT && !TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
772+ "<store>\t%1,%0,%2"
773+ [(set (attr "length") (const_int 8))])
774diff -urN original-gcc/gcc/config/riscv/predicates.md gcc/gcc/config/riscv/predicates.md
775--- original-gcc/gcc/config/riscv/predicates.md 1970-01-01 01:00:00.000000000 +0100
776+++ gcc-4.9.2/gcc/config/riscv/predicates.md 2015-03-07 09:51:45.663139025 +0100
777@@ -0,0 +1,182 @@
778+;; Predicate description for RISC-V target.
779+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
780+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
781+;; Based on MIPS target for GNU compiler.
782+;;
783+;; This file is part of GCC.
784+;;
785+;; GCC is free software; you can redistribute it and/or modify
786+;; it under the terms of the GNU General Public License as published by
787+;; the Free Software Foundation; either version 3, or (at your option)
788+;; any later version.
789+;;
790+;; GCC is distributed in the hope that it will be useful,
791+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
792+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
793+;; GNU General Public License for more details.
794+;;
795+;; You should have received a copy of the GNU General Public License
796+;; along with GCC; see the file COPYING3. If not see
797+;; <http://www.gnu.org/licenses/>.
798+
799+(define_predicate "const_arith_operand"
800+ (and (match_code "const_int")
801+ (match_test "SMALL_OPERAND (INTVAL (op))")))
802+
803+(define_predicate "arith_operand"
804+ (ior (match_operand 0 "const_arith_operand")
805+ (match_operand 0 "register_operand")))
806+
807+(define_predicate "sle_operand"
808+ (and (match_code "const_int")
809+ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
810+
811+(define_predicate "sleu_operand"
812+ (and (match_operand 0 "sle_operand")
813+ (match_test "INTVAL (op) + 1 != 0")))
814+
815+(define_predicate "const_0_operand"
816+ (and (match_code "const_int,const_double,const_vector")
817+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
818+
819+(define_predicate "reg_or_0_operand"
820+ (ior (match_operand 0 "const_0_operand")
821+ (match_operand 0 "register_operand")))
822+
823+(define_predicate "const_1_operand"
824+ (and (match_code "const_int,const_double,const_vector")
825+ (match_test "op == CONST1_RTX (GET_MODE (op))")))
826+
827+(define_predicate "reg_or_1_operand"
828+ (ior (match_operand 0 "const_1_operand")
829+ (match_operand 0 "register_operand")))
830+
831+;; This is used for indexing into vectors, and hence only accepts const_int.
832+(define_predicate "const_0_or_1_operand"
833+ (and (match_code "const_int")
834+ (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
835+ (match_test "op == CONST1_RTX (GET_MODE (op))"))))
836+
837+(define_special_predicate "pc_or_label_operand"
838+ (match_code "pc,label_ref"))
839+
840+;; A legitimate CONST_INT operand that takes more than one instruction
841+;; to load.
842+(define_predicate "splittable_const_int_operand"
843+ (match_code "const_int")
844+{
845+ /* Don't handle multi-word moves this way; we don't want to introduce
846+ the individual word-mode moves until after reload. */
847+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
848+ return false;
849+
850+ /* Otherwise check whether the constant can be loaded in a single
851+ instruction. */
852+ return !LUI_INT (op) && !SMALL_INT (op);
853+})
854+
855+(define_predicate "move_operand"
856+ (match_operand 0 "general_operand")
857+{
858+ enum riscv_symbol_type symbol_type;
859+
860+ /* The thinking here is as follows:
861+
862+ (1) The move expanders should split complex load sequences into
863+ individual instructions. Those individual instructions can
864+ then be optimized by all rtl passes.
865+
866+ (2) The target of pre-reload load sequences should not be used
867+ to store temporary results. If the target register is only
868+ assigned one value, reload can rematerialize that value
869+ on demand, rather than spill it to the stack.
870+
871+ (3) If we allowed pre-reload passes like combine and cse to recreate
872+ complex load sequences, we would want to be able to split the
873+ sequences before reload as well, so that the pre-reload scheduler
874+ can see the individual instructions. This falls foul of (2);
875+ the splitter would be forced to reuse the target register for
876+ intermediate results.
877+
878+ (4) We want to define complex load splitters for combine. These
879+ splitters can request a temporary scratch register, which avoids
880+ the problem in (2). They allow things like:
881+
882+ (set (reg T1) (high SYM))
883+ (set (reg T2) (low (reg T1) SYM))
884+ (set (reg X) (plus (reg T2) (const_int OFFSET)))
885+
886+ to be combined into:
887+
888+ (set (reg T3) (high SYM+OFFSET))
889+ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
890+
891+ if T2 is only used this once. */
892+ switch (GET_CODE (op))
893+ {
894+ case CONST_INT:
895+ return !splittable_const_int_operand (op, mode);
896+
897+ case CONST:
898+ case SYMBOL_REF:
899+ case LABEL_REF:
900+ return (riscv_symbolic_constant_p (op, &symbol_type)
901+ && !riscv_hi_relocs[symbol_type]);
902+
903+ case HIGH:
904+ op = XEXP (op, 0);
905+ return riscv_symbolic_constant_p (op, &symbol_type);
906+
907+ default:
908+ return true;
909+ }
910+})
911+
912+(define_predicate "consttable_operand"
913+ (match_test "CONSTANT_P (op)"))
914+
915+(define_predicate "symbolic_operand"
916+ (match_code "const,symbol_ref,label_ref")
917+{
918+ enum riscv_symbol_type type;
919+ return riscv_symbolic_constant_p (op, &type);
920+})
921+
922+(define_predicate "absolute_symbolic_operand"
923+ (match_code "const,symbol_ref,label_ref")
924+{
925+ enum riscv_symbol_type type;
926+ return (riscv_symbolic_constant_p (op, &type)
927+ && type == SYMBOL_ABSOLUTE);
928+})
929+
930+(define_predicate "plt_symbolic_operand"
931+ (match_code "const,symbol_ref,label_ref")
932+{
933+ enum riscv_symbol_type type;
934+ return (riscv_symbolic_constant_p (op, &type)
935+ && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
936+})
937+
938+(define_predicate "call_insn_operand"
939+ (ior (match_operand 0 "absolute_symbolic_operand")
940+ (match_operand 0 "plt_symbolic_operand")
941+ (match_operand 0 "register_operand")))
942+
943+(define_predicate "symbol_ref_operand"
944+ (match_code "symbol_ref"))
945+
946+(define_predicate "modular_operator"
947+ (match_code "plus,minus,mult,ashift"))
948+
949+(define_predicate "equality_operator"
950+ (match_code "eq,ne"))
951+
952+(define_predicate "order_operator"
953+ (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
954+
955+(define_predicate "fp_order_operator"
956+ (match_code "eq,lt,le,gt,ge"))
957+
958+(define_predicate "fp_unorder_operator"
959+ (match_code "ordered,unordered"))
960diff -urN original-gcc/gcc/config/riscv/riscv.c gcc/gcc/config/riscv/riscv.c
961--- original-gcc/gcc/config/riscv/riscv.c 1970-01-01 01:00:00.000000000 +0100
962+++ gcc-4.9.2/gcc/config/riscv/riscv.c 2015-03-07 09:51:45.667139025 +0100
963@@ -0,0 +1,4292 @@
964+/* Subroutines used for code generation for RISC-V.
965+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
966+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
967+ Based on MIPS target for GNU compiler.
968+
969+This file is part of GCC.
970+
971+GCC is free software; you can redistribute it and/or modify
972+it under the terms of the GNU General Public License as published by
973+the Free Software Foundation; either version 3, or (at your option)
974+any later version.
975+
976+GCC is distributed in the hope that it will be useful,
977+but WITHOUT ANY WARRANTY; without even the implied warranty of
978+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
979+GNU General Public License for more details.
980+
981+You should have received a copy of the GNU General Public License
982+along with GCC; see the file COPYING3. If not see
983+<http://www.gnu.org/licenses/>. */
984+
985+#include "config.h"
986+#include "system.h"
987+#include "coretypes.h"
988+#include "tm.h"
989+#include "rtl.h"
990+#include "regs.h"
991+#include "hard-reg-set.h"
992+#include "insn-config.h"
993+#include "conditions.h"
994+#include "insn-attr.h"
995+#include "recog.h"
996+#include "output.h"
997+#include "tree.h"
998+#include "varasm.h"
999+#include "stor-layout.h"
1000+#include "calls.h"
1001+#include "function.h"
1002+#include "expr.h"
1003+#include "optabs.h"
1004+#include "libfuncs.h"
1005+#include "flags.h"
1006+#include "reload.h"
1007+#include "tm_p.h"
1008+#include "ggc.h"
1009+#include "gstab.h"
1010+#include "hashtab.h"
1011+#include "debug.h"
1012+#include "target.h"
1013+#include "target-def.h"
1014+#include "langhooks.h"
1015+#include "sched-int.h"
1016+#include "bitmap.h"
1017+#include "diagnostic.h"
1018+#include "target-globals.h"
1019+#include "symcat.h"
1020+#include <stdint.h>
1021+
1022+/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
1023+#define UNSPEC_ADDRESS_P(X) \
1024+ (GET_CODE (X) == UNSPEC \
1025+ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
1026+ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
1027+
1028+/* Extract the symbol or label from UNSPEC wrapper X. */
1029+#define UNSPEC_ADDRESS(X) \
1030+ XVECEXP (X, 0, 0)
1031+
1032+/* Extract the symbol type from UNSPEC wrapper X. */
1033+#define UNSPEC_ADDRESS_TYPE(X) \
1034+ ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
1035+
1036+/* The maximum distance between the top of the stack frame and the
1037+ value sp has when we save and restore registers. This is set by the
1038+ range of load/store offsets and must also preserve stack alignment. */
1039+#define RISCV_MAX_FIRST_STACK_STEP (RISCV_IMM_REACH/2 - 16)
1040+
1041+/* True if INSN is a riscv.md pattern or asm statement. */
1042+#define USEFUL_INSN_P(INSN) \
1043+ (NONDEBUG_INSN_P (INSN) \
1044+ && GET_CODE (PATTERN (INSN)) != USE \
1045+ && GET_CODE (PATTERN (INSN)) != CLOBBER \
1046+ && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
1047+ && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
1048+
1049+/* True if bit BIT is set in VALUE. */
1050+#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
1051+
1052+/* Classifies an address.
1053+
1054+ ADDRESS_REG
1055+ A natural register + offset address. The register satisfies
1056+ riscv_valid_base_register_p and the offset is a const_arith_operand.
1057+
1058+ ADDRESS_LO_SUM
1059+ A LO_SUM rtx. The first operand is a valid base register and
1060+ the second operand is a symbolic address.
1061+
1062+ ADDRESS_CONST_INT
1063+ A signed 16-bit constant address.
1064+
1065+ ADDRESS_SYMBOLIC:
1066+ A constant symbolic address. */
1067+enum riscv_address_type {
1068+ ADDRESS_REG,
1069+ ADDRESS_LO_SUM,
1070+ ADDRESS_CONST_INT,
1071+ ADDRESS_SYMBOLIC
1072+};
1073+
1074+/* Macros to create an enumeration identifier for a function prototype. */
1075+#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
1076+#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
1077+#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
1078+#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
1079+
1080+/* Classifies the prototype of a built-in function. */
1081+enum riscv_function_type {
1082+#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
1083+#include "config/riscv/riscv-ftypes.def"
1084+#undef DEF_RISCV_FTYPE
1085+ RISCV_MAX_FTYPE_MAX
1086+};
1087+
1088+/* Specifies how a built-in function should be converted into rtl. */
1089+enum riscv_builtin_type {
1090+ /* The function corresponds directly to an .md pattern. The return
1091+ value is mapped to operand 0 and the arguments are mapped to
1092+ operands 1 and above. */
1093+ RISCV_BUILTIN_DIRECT,
1094+
1095+ /* The function corresponds directly to an .md pattern. There is no return
1096+ value and the arguments are mapped to operands 0 and above. */
1097+ RISCV_BUILTIN_DIRECT_NO_TARGET
1098+};
1099+
1100+/* Information about a function's frame layout. */
1101+struct GTY(()) riscv_frame_info {
1102+ /* The size of the frame in bytes. */
1103+ HOST_WIDE_INT total_size;
1104+
1105+ /* Bit X is set if the function saves or restores GPR X. */
1106+ unsigned int mask;
1107+
1108+ /* Likewise FPR X. */
1109+ unsigned int fmask;
1110+
1111+ /* Offsets of fixed-point and floating-point save areas from frame bottom */
1112+ HOST_WIDE_INT gp_sp_offset;
1113+ HOST_WIDE_INT fp_sp_offset;
1114+
1115+ /* Offset of virtual frame pointer from stack pointer/frame bottom */
1116+ HOST_WIDE_INT frame_pointer_offset;
1117+
1118+ /* Offset of hard frame pointer from stack pointer/frame bottom */
1119+ HOST_WIDE_INT hard_frame_pointer_offset;
1120+
1121+ /* The offset of arg_pointer_rtx from the bottom of the frame. */
1122+ HOST_WIDE_INT arg_pointer_offset;
1123+};
1124+
1125+struct GTY(()) machine_function {
1126+ /* The number of extra stack bytes taken up by register varargs.
1127+ This area is allocated by the callee at the very top of the frame. */
1128+ int varargs_size;
1129+
1130+ /* The current frame information, calculated by riscv_compute_frame_info. */
1131+ struct riscv_frame_info frame;
1132+};
1133+
1134+/* Information about a single argument. */
1135+struct riscv_arg_info {
1136+ /* True if the argument is passed in a floating-point register, or
1137+ would have been if we hadn't run out of registers. */
1138+ bool fpr_p;
1139+
1140+ /* The number of words passed in registers, rounded up. */
1141+ unsigned int reg_words;
1142+
1143+ /* For EABI, the offset of the first register from GP_ARG_FIRST or
1144+ FP_ARG_FIRST. For other ABIs, the offset of the first register from
1145+ the start of the ABI's argument structure (see the CUMULATIVE_ARGS
1146+ comment for details).
1147+
1148+ The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
1149+ on the stack. */
1150+ unsigned int reg_offset;
1151+
1152+ /* The number of words that must be passed on the stack, rounded up. */
1153+ unsigned int stack_words;
1154+
1155+ /* The offset from the start of the stack overflow area of the argument's
1156+ first stack word. Only meaningful when STACK_WORDS is nonzero. */
1157+ unsigned int stack_offset;
1158+};
1159+
1160+/* Information about an address described by riscv_address_type.
1161+
1162+ ADDRESS_CONST_INT
1163+ No fields are used.
1164+
1165+ ADDRESS_REG
1166+ REG is the base register and OFFSET is the constant offset.
1167+
1168+ ADDRESS_LO_SUM
1169+ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
1170+ is the type of symbol it references.
1171+
1172+ ADDRESS_SYMBOLIC
1173+ SYMBOL_TYPE is the type of symbol that the address references. */
1174+struct riscv_address_info {
1175+ enum riscv_address_type type;
1176+ rtx reg;
1177+ rtx offset;
1178+ enum riscv_symbol_type symbol_type;
1179+};
1180+
1181+/* One stage in a constant building sequence. These sequences have
1182+ the form:
1183+
1184+ A = VALUE[0]
1185+ A = A CODE[1] VALUE[1]
1186+ A = A CODE[2] VALUE[2]
1187+ ...
1188+
1189+ where A is an accumulator, each CODE[i] is a binary rtl operation
1190+ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
1191+struct riscv_integer_op {
1192+ enum rtx_code code;
1193+ unsigned HOST_WIDE_INT value;
1194+};
1195+
1196+/* The largest number of operations needed to load an integer constant.
1197+ The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
1198+ but we may attempt and reject even worse sequences. */
1199+#define RISCV_MAX_INTEGER_OPS 32
1200+
1201+/* Costs of various operations on the different architectures. */
1202+
1203+struct riscv_tune_info
1204+{
1205+ unsigned short fp_add[2];
1206+ unsigned short fp_mul[2];
1207+ unsigned short fp_div[2];
1208+ unsigned short int_mul[2];
1209+ unsigned short int_div[2];
1210+ unsigned short issue_rate;
1211+ unsigned short branch_cost;
1212+ unsigned short fp_to_int_cost;
1213+ unsigned short memory_cost;
1214+};
1215+
1216+/* Information about one CPU we know about. */
1217+struct riscv_cpu_info {
1218+ /* This CPU's canonical name. */
1219+ const char *name;
1220+
1221+ /* The RISC-V ISA and extensions supported by this CPU. */
1222+ const char *isa;
1223+
1224+ /* Tuning parameters for this CPU. */
1225+ const struct riscv_tune_info *tune_info;
1226+};
1227+
1228+/* Global variables for machine-dependent things. */
1229+
1230+/* Which tuning parameters to use. */
1231+static const struct riscv_tune_info *tune_info;
1232+
1233+/* Index [M][R] is true if register R is allowed to hold a value of mode M. */
1234+bool riscv_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
1235+
1236+/* riscv_lo_relocs[X] is the relocation to use when a symbol of type X
1237+ appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
1238+ if they are matched by a special .md file pattern. */
1239+const char *riscv_lo_relocs[NUM_SYMBOL_TYPES];
1240+
1241+/* Likewise for HIGHs. */
1242+const char *riscv_hi_relocs[NUM_SYMBOL_TYPES];
1243+
1244+/* Index R is the smallest register class that contains register R. */
1245+const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
1246+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
1247+ GR_REGS, T_REGS, T_REGS, T_REGS,
1248+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
1249+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
1250+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
1251+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
1252+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
1253+ T_REGS, T_REGS, T_REGS, T_REGS,
1254+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1255+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1256+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1257+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1258+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1259+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1260+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1261+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1262+ FRAME_REGS, FRAME_REGS,
1263+};
1264+
1265+/* Costs to use when optimizing for size. */
1266+static const struct riscv_tune_info rocket_tune_info = {
1267+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
1268+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
1269+ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
1270+ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
1271+ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
1272+ 1, /* issue_rate */
1273+ 3, /* branch_cost */
1274+ COSTS_N_INSNS (2), /* fp_to_int_cost */
1275+ 5 /* memory_cost */
1276+};
1277+
1278+/* Costs to use when optimizing for size. */
1279+static const struct riscv_tune_info optimize_size_tune_info = {
1280+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
1281+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
1282+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
1283+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
1284+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
1285+ 1, /* issue_rate */
1286+ 1, /* branch_cost */
1287+ COSTS_N_INSNS (1), /* fp_to_int_cost */
1288+ 1 /* memory_cost */
1289+};
1290+
1291+/* A table describing all the processors GCC knows about. */
1292+static const struct riscv_cpu_info riscv_cpu_info_table[] = {
1293+ /* Entries for generic ISAs. */
1294+ { "rocket", "IMAFD", &rocket_tune_info },
1295+};
1296+
1297+/* Return the riscv_cpu_info entry for the given name string. */
1298+
1299+static const struct riscv_cpu_info *
1300+riscv_parse_cpu (const char *cpu_string)
1301+{
1302+ unsigned int i;
1303+
1304+ for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
1305+ if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
1306+ return riscv_cpu_info_table + i;
1307+
1308+ error ("unknown cpu `%s'", cpu_string);
1309+ return riscv_cpu_info_table;
1310+}
1311+
1312+/* Fill CODES with a sequence of rtl operations to load VALUE.
1313+ Return the number of operations needed. */
1314+
1315+static int
1316+riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
1317+ enum machine_mode mode)
1318+{
1319+ HOST_WIDE_INT low_part = RISCV_CONST_LOW_PART (value);
1320+ int cost = INT_MAX, alt_cost;
1321+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
1322+
1323+ if (SMALL_OPERAND (value) || LUI_OPERAND (value))
1324+ {
1325+ /* Simply ADDI or LUI */
1326+ codes[0].code = UNKNOWN;
1327+ codes[0].value = value;
1328+ return 1;
1329+ }
1330+
1331+ /* End with ADDI */
1332+ if (low_part != 0
1333+ && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
1334+ {
1335+ cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
1336+ codes[cost-1].code = PLUS;
1337+ codes[cost-1].value = low_part;
1338+ }
1339+
1340+ /* End with XORI */
1341+ if (cost > 2 && (low_part < 0 || mode == HImode))
1342+ {
1343+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
1344+ alt_codes[alt_cost-1].code = XOR;
1345+ alt_codes[alt_cost-1].value = low_part;
1346+ if (alt_cost < cost)
1347+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
1348+ }
1349+
1350+ /* Eliminate trailing zeros and end with SLLI */
1351+ if (cost > 2 && (value & 1) == 0)
1352+ {
1353+ int shift = 0;
1354+ while ((value & 1) == 0)
1355+ shift++, value >>= 1;
1356+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
1357+ alt_codes[alt_cost-1].code = ASHIFT;
1358+ alt_codes[alt_cost-1].value = shift;
1359+ if (alt_cost < cost)
1360+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
1361+ }
1362+
1363+ gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
1364+ return cost;
1365+}
1366+
1367+static int
1368+riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
1369+ enum machine_mode mode)
1370+{
1371+ int cost = riscv_build_integer_1 (codes, value, mode);
1372+
1373+ /* Eliminate leading zeros and end with SRLI */
1374+ if (value > 0 && cost > 2)
1375+ {
1376+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
1377+ int alt_cost, shift = 0;
1378+ HOST_WIDE_INT shifted_val;
1379+
1380+ /* Try filling trailing bits with 1s */
1381+ while ((value << shift) >= 0)
1382+ shift++;
1383+ shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
1384+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
1385+ alt_codes[alt_cost-1].code = LSHIFTRT;
1386+ alt_codes[alt_cost-1].value = shift;
1387+ if (alt_cost < cost)
1388+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
1389+
1390+ /* Try filling trailing bits with 0s */
1391+ shifted_val = value << shift;
1392+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
1393+ alt_codes[alt_cost-1].code = LSHIFTRT;
1394+ alt_codes[alt_cost-1].value = shift;
1395+ if (alt_cost < cost)
1396+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
1397+ }
1398+
1399+ return cost;
1400+}
1401+
1402+static int
1403+riscv_split_integer_cost (HOST_WIDE_INT val)
1404+{
1405+ int cost;
1406+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
1407+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
1408+
1409+ cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
1410+ if (loval != hival)
1411+ cost += riscv_build_integer (codes, hival, VOIDmode);
1412+
1413+ return cost;
1414+}
1415+
1416+static int
1417+riscv_integer_cost (HOST_WIDE_INT val)
1418+{
1419+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
1420+ return MIN (riscv_build_integer (codes, val, VOIDmode),
1421+ riscv_split_integer_cost (val));
1422+}
1423+
1424+/* Try to split a 64b integer into 32b parts, then reassemble. */
1425+
1426+static rtx
1427+riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
1428+{
1429+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
1430+ rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
1431+
1432+ riscv_move_integer (hi, hi, hival);
1433+ riscv_move_integer (lo, lo, loval);
1434+
1435+ hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
1436+ hi = force_reg (mode, hi);
1437+
1438+ return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
1439+}
1440+
1441+/* Return true if X is a thread-local symbol. */
1442+
1443+static bool
1444+riscv_tls_symbol_p (const_rtx x)
1445+{
1446+ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1447+}
1448+
1449+static bool
1450+riscv_symbol_binds_local_p (const_rtx x)
1451+{
1452+ return (SYMBOL_REF_DECL (x)
1453+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1454+ : SYMBOL_REF_LOCAL_P (x));
1455+}
1456+
1457+/* Return the method that should be used to access SYMBOL_REF or
1458+ LABEL_REF X in context CONTEXT. */
1459+
1460+static enum riscv_symbol_type
1461+riscv_classify_symbol (const_rtx x)
1462+{
1463+ if (riscv_tls_symbol_p (x))
1464+ return SYMBOL_TLS;
1465+
1466+ if (GET_CODE (x) == LABEL_REF)
1467+ {
1468+ if (LABEL_REF_NONLOCAL_P (x))
1469+ return SYMBOL_GOT_DISP;
1470+ return SYMBOL_ABSOLUTE;
1471+ }
1472+
1473+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
1474+
1475+ if (flag_pic && !riscv_symbol_binds_local_p (x))
1476+ return SYMBOL_GOT_DISP;
1477+
1478+ return SYMBOL_ABSOLUTE;
1479+}
1480+
1481+/* Classify the base of symbolic expression X, given that X appears in
1482+ context CONTEXT. */
1483+
1484+static enum riscv_symbol_type
1485+riscv_classify_symbolic_expression (rtx x)
1486+{
1487+ rtx offset;
1488+
1489+ split_const (x, &x, &offset);
1490+ if (UNSPEC_ADDRESS_P (x))
1491+ return UNSPEC_ADDRESS_TYPE (x);
1492+
1493+ return riscv_classify_symbol (x);
1494+}
1495+
1496+/* Return true if X is a symbolic constant that can be used in context
1497+ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1498+
1499+bool
1500+riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
1501+{
1502+ rtx offset;
1503+
1504+ split_const (x, &x, &offset);
1505+ if (UNSPEC_ADDRESS_P (x))
1506+ {
1507+ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1508+ x = UNSPEC_ADDRESS (x);
1509+ }
1510+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1511+ *symbol_type = riscv_classify_symbol (x);
1512+ else
1513+ return false;
1514+
1515+ if (offset == const0_rtx)
1516+ return true;
1517+
1518+ /* Check whether a nonzero offset is valid for the underlying
1519+ relocations. */
1520+ switch (*symbol_type)
1521+ {
1522+ case SYMBOL_ABSOLUTE:
1523+ case SYMBOL_TLS_LE:
1524+ return (int32_t) INTVAL (offset) == INTVAL (offset);
1525+
1526+ default:
1527+ return false;
1528+ }
1529+ gcc_unreachable ();
1530+}
1531+
1532+/* Returns the number of instructions necessary to reference a symbol. */
1533+
1534+static int riscv_symbol_insns (enum riscv_symbol_type type)
1535+{
1536+ switch (type)
1537+ {
1538+ case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
1539+ case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
1540+ case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
1541+ case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
1542+ default: gcc_unreachable();
1543+ }
1544+}
1545+
1546+/* A for_each_rtx callback. Stop the search if *X references a
1547+ thread-local symbol. */
1548+
1549+static int
1550+riscv_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1551+{
1552+ return riscv_tls_symbol_p (*x);
1553+}
1554+
1555+/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
1556+
1557+static bool
1558+riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1559+{
1560+ return riscv_const_insns (x) > 0;
1561+}
1562+
1563+/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1564+
1565+static bool
1566+riscv_cannot_force_const_mem (enum machine_mode mode, rtx x)
1567+{
1568+ enum riscv_symbol_type type;
1569+ rtx base, offset;
1570+
1571+ /* There is no assembler syntax for expressing an address-sized
1572+ high part. */
1573+ if (GET_CODE (x) == HIGH)
1574+ return true;
1575+
1576+ /* As an optimization, reject constants that riscv_legitimize_move
1577+ can expand inline.
1578+
1579+ Suppose we have a multi-instruction sequence that loads constant C
1580+ into register R. If R does not get allocated a hard register, and
1581+ R is used in an operand that allows both registers and memory
1582+ references, reload will consider forcing C into memory and using
1583+ one of the instruction's memory alternatives. Returning false
1584+ here will force it to use an input reload instead. */
1585+ if (CONST_INT_P (x) && riscv_legitimate_constant_p (mode, x))
1586+ return true;
1587+
1588+ split_const (x, &base, &offset);
1589+ if (riscv_symbolic_constant_p (base, &type))
1590+ {
1591+ /* The same optimization as for CONST_INT. */
1592+ if (SMALL_INT (offset) && riscv_symbol_insns (type) > 0)
1593+ return true;
1594+
1595+ /* It's not worth creating additional dynamic relocations. */
1596+ if (flag_pic)
1597+ return true;
1598+ }
1599+
1600+ /* TLS symbols must be computed by riscv_legitimize_move. */
1601+ if (for_each_rtx (&x, &riscv_tls_symbol_ref_1, NULL))
1602+ return true;
1603+
1604+ return false;
1605+}
1606+
1607+/* Return true if register REGNO is a valid base register for mode MODE.
1608+ STRICT_P is true if REG_OK_STRICT is in effect. */
1609+
1610+int
1611+riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
1612+ bool strict_p)
1613+{
1614+ if (!HARD_REGISTER_NUM_P (regno))
1615+ {
1616+ if (!strict_p)
1617+ return true;
1618+ regno = reg_renumber[regno];
1619+ }
1620+
1621+ /* These fake registers will be eliminated to either the stack or
1622+ hard frame pointer, both of which are usually valid base registers.
1623+ Reload deals with the cases where the eliminated form isn't valid. */
1624+ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1625+ return true;
1626+
1627+ return GP_REG_P (regno);
1628+}
1629+
1630+/* Return true if X is a valid base register for mode MODE.
1631+ STRICT_P is true if REG_OK_STRICT is in effect. */
1632+
1633+static bool
1634+riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
1635+{
1636+ if (!strict_p && GET_CODE (x) == SUBREG)
1637+ x = SUBREG_REG (x);
1638+
1639+ return (REG_P (x)
1640+ && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
1641+}
1642+
1643+/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
1644+ can address a value of mode MODE. */
1645+
1646+static bool
1647+riscv_valid_offset_p (rtx x, enum machine_mode mode)
1648+{
1649+ /* Check that X is a signed 12-bit number. */
1650+ if (!const_arith_operand (x, Pmode))
1651+ return false;
1652+
1653+ /* We may need to split multiword moves, so make sure that every word
1654+ is accessible. */
1655+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1656+ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
1657+ return false;
1658+
1659+ return true;
1660+}
1661+
1662+/* Return true if a LO_SUM can address a value of mode MODE when the
1663+ LO_SUM symbol has type SYMBOL_TYPE. */
1664+
1665+static bool
1666+riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
1667+{
1668+ /* Check that symbols of type SYMBOL_TYPE can be used to access values
1669+ of mode MODE. */
1670+ if (riscv_symbol_insns (symbol_type) == 0)
1671+ return false;
1672+
1673+ /* Check that there is a known low-part relocation. */
1674+ if (riscv_lo_relocs[symbol_type] == NULL)
1675+ return false;
1676+
1677+ /* We may need to split multiword moves, so make sure that each word
1678+ can be accessed without inducing a carry. This is mainly needed
1679+ for o64, which has historically only guaranteed 64-bit alignment
1680+ for 128-bit types. */
1681+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
1682+ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
1683+ return false;
1684+
1685+ return true;
1686+}
1687+
1688+/* Return true if X is a valid address for machine mode MODE. If it is,
1689+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
1690+ effect. */
1691+
1692+static bool
1693+riscv_classify_address (struct riscv_address_info *info, rtx x,
1694+ enum machine_mode mode, bool strict_p)
1695+{
1696+ switch (GET_CODE (x))
1697+ {
1698+ case REG:
1699+ case SUBREG:
1700+ info->type = ADDRESS_REG;
1701+ info->reg = x;
1702+ info->offset = const0_rtx;
1703+ return riscv_valid_base_register_p (info->reg, mode, strict_p);
1704+
1705+ case PLUS:
1706+ info->type = ADDRESS_REG;
1707+ info->reg = XEXP (x, 0);
1708+ info->offset = XEXP (x, 1);
1709+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
1710+ && riscv_valid_offset_p (info->offset, mode));
1711+
1712+ case LO_SUM:
1713+ info->type = ADDRESS_LO_SUM;
1714+ info->reg = XEXP (x, 0);
1715+ info->offset = XEXP (x, 1);
1716+ /* We have to trust the creator of the LO_SUM to do something vaguely
1717+ sane. Target-independent code that creates a LO_SUM should also
1718+ create and verify the matching HIGH. Target-independent code that
1719+ adds an offset to a LO_SUM must prove that the offset will not
1720+ induce a carry. Failure to do either of these things would be
1721+ a bug, and we are not required to check for it here. The RISCV
1722+ backend itself should only create LO_SUMs for valid symbolic
1723+ constants, with the high part being either a HIGH or a copy
1724+ of _gp. */
1725+ info->symbol_type
1726+ = riscv_classify_symbolic_expression (info->offset);
1727+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
1728+ && riscv_valid_lo_sum_p (info->symbol_type, mode));
1729+
1730+ case CONST_INT:
1731+ /* Small-integer addresses don't occur very often, but they
1732+ are legitimate if $0 is a valid base register. */
1733+ info->type = ADDRESS_CONST_INT;
1734+ return SMALL_INT (x);
1735+
1736+ default:
1737+ return false;
1738+ }
1739+}
1740+
1741+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
1742+
1743+static bool
1744+riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
1745+{
1746+ struct riscv_address_info addr;
1747+
1748+ return riscv_classify_address (&addr, x, mode, strict_p);
1749+}
1750+
1751+/* Return the number of instructions needed to load or store a value
1752+ of mode MODE at address X. Return 0 if X isn't valid for MODE.
1753+ Assume that multiword moves may need to be split into word moves
1754+ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
1755+ enough. */
1756+
1757+int
1758+riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
1759+{
1760+ struct riscv_address_info addr;
1761+ int n = 1;
1762+
1763+ if (!riscv_classify_address (&addr, x, mode, false))
1764+ return 0;
1765+
1766+ /* BLKmode is used for single unaligned loads and stores and should
1767+ not count as a multiword mode. */
1768+ if (mode != BLKmode && might_split_p)
1769+ n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1770+
1771+ if (addr.type == ADDRESS_LO_SUM)
1772+ n += riscv_symbol_insns (addr.symbol_type) - 1;
1773+
1774+ return n;
1775+}
1776+
1777+/* Return the number of instructions needed to load constant X.
1778+ Return 0 if X isn't a valid constant. */
1779+
1780+int
1781+riscv_const_insns (rtx x)
1782+{
1783+ enum riscv_symbol_type symbol_type;
1784+ rtx offset;
1785+
1786+ switch (GET_CODE (x))
1787+ {
1788+ case HIGH:
1789+ if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1790+ || !riscv_hi_relocs[symbol_type])
1791+ return 0;
1792+
1793+ /* This is simply an LUI. */
1794+ return 1;
1795+
1796+ case CONST_INT:
1797+ {
1798+ int cost = riscv_integer_cost (INTVAL (x));
1799+ /* Force complicated constants to memory. */
1800+ return cost < 4 ? cost : 0;
1801+ }
1802+
1803+ case CONST_DOUBLE:
1804+ case CONST_VECTOR:
1805+ /* Allow zeros for normal mode, where we can use x0. */
1806+ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
1807+
1808+ case CONST:
1809+ /* See if we can refer to X directly. */
1810+ if (riscv_symbolic_constant_p (x, &symbol_type))
1811+ return riscv_symbol_insns (symbol_type);
1812+
1813+ /* Otherwise try splitting the constant into a base and offset.
1814+ If the offset is a 16-bit value, we can load the base address
1815+ into a register and then use (D)ADDIU to add in the offset.
1816+ If the offset is larger, we can load the base and offset
1817+ into separate registers and add them together with (D)ADDU.
1818+ However, the latter is only possible before reload; during
1819+ and after reload, we must have the option of forcing the
1820+ constant into the pool instead. */
1821+ split_const (x, &x, &offset);
1822+ if (offset != 0)
1823+ {
1824+ int n = riscv_const_insns (x);
1825+ if (n != 0)
1826+ {
1827+ if (SMALL_INT (offset))
1828+ return n + 1;
1829+ else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
1830+ return n + 1 + riscv_integer_cost (INTVAL (offset));
1831+ }
1832+ }
1833+ return 0;
1834+
1835+ case SYMBOL_REF:
1836+ case LABEL_REF:
1837+ return riscv_symbol_insns (riscv_classify_symbol (x));
1838+
1839+ default:
1840+ return 0;
1841+ }
1842+}
1843+
1844+/* X is a doubleword constant that can be handled by splitting it into
1845+ two words and loading each word separately. Return the number of
1846+ instructions required to do this. */
1847+
1848+int
1849+riscv_split_const_insns (rtx x)
1850+{
1851+ unsigned int low, high;
1852+
1853+ low = riscv_const_insns (riscv_subword (x, false));
1854+ high = riscv_const_insns (riscv_subword (x, true));
1855+ gcc_assert (low > 0 && high > 0);
1856+ return low + high;
1857+}
1858+
1859+/* Return the number of instructions needed to implement INSN,
1860+ given that it loads from or stores to MEM. */
1861+
1862+int
1863+riscv_load_store_insns (rtx mem, rtx insn)
1864+{
1865+ enum machine_mode mode;
1866+ bool might_split_p;
1867+ rtx set;
1868+
1869+ gcc_assert (MEM_P (mem));
1870+ mode = GET_MODE (mem);
1871+
1872+ /* Try to prove that INSN does not need to be split. */
1873+ might_split_p = true;
1874+ if (GET_MODE_BITSIZE (mode) == 64)
1875+ {
1876+ set = single_set (insn);
1877+ if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
1878+ might_split_p = false;
1879+ }
1880+
1881+ return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
1882+}
1883+
1884+/* Emit a move from SRC to DEST. Assume that the move expanders can
1885+ handle all moves if !can_create_pseudo_p (). The distinction is
1886+ important because, unlike emit_move_insn, the move expanders know
1887+ how to force Pmode objects into the constant pool even when the
1888+ constant pool address is not itself legitimate. */
1889+
1890+rtx
1891+riscv_emit_move (rtx dest, rtx src)
1892+{
1893+ return (can_create_pseudo_p ()
1894+ ? emit_move_insn (dest, src)
1895+ : emit_move_insn_1 (dest, src));
1896+}
1897+
1898+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
1899+
1900+static void
1901+riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
1902+{
1903+ emit_insn (gen_rtx_SET (VOIDmode, target,
1904+ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
1905+}
1906+
1907+/* Compute (CODE OP0 OP1) and store the result in a new register
1908+ of mode MODE. Return that new register. */
1909+
1910+static rtx
1911+riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
1912+{
1913+ rtx reg;
1914+
1915+ reg = gen_reg_rtx (mode);
1916+ riscv_emit_binary (code, reg, op0, op1);
1917+ return reg;
1918+}
1919+
1920+/* Copy VALUE to a register and return that register. If new pseudos
1921+ are allowed, copy it into a new register, otherwise use DEST. */
1922+
1923+static rtx
1924+riscv_force_temporary (rtx dest, rtx value)
1925+{
1926+ if (can_create_pseudo_p ())
1927+ return force_reg (Pmode, value);
1928+ else
1929+ {
1930+ riscv_emit_move (dest, value);
1931+ return dest;
1932+ }
1933+}
1934+
1935+/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
1936+ then add CONST_INT OFFSET to the result. */
1937+
1938+static rtx
1939+riscv_unspec_address_offset (rtx base, rtx offset,
1940+ enum riscv_symbol_type symbol_type)
1941+{
1942+ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1943+ UNSPEC_ADDRESS_FIRST + symbol_type);
1944+ if (offset != const0_rtx)
1945+ base = gen_rtx_PLUS (Pmode, base, offset);
1946+ return gen_rtx_CONST (Pmode, base);
1947+}
1948+
1949+/* Return an UNSPEC address with underlying address ADDRESS and symbol
1950+ type SYMBOL_TYPE. */
1951+
1952+rtx
1953+riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
1954+{
1955+ rtx base, offset;
1956+
1957+ split_const (address, &base, &offset);
1958+ return riscv_unspec_address_offset (base, offset, symbol_type);
1959+}
1960+
1961+/* If OP is an UNSPEC address, return the address to which it refers,
1962+ otherwise return OP itself. */
1963+
1964+static rtx
1965+riscv_strip_unspec_address (rtx op)
1966+{
1967+ rtx base, offset;
1968+
1969+ split_const (op, &base, &offset);
1970+ if (UNSPEC_ADDRESS_P (base))
1971+ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
1972+ return op;
1973+}
1974+
1975+/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1976+ high part to BASE and return the result. Just return BASE otherwise.
1977+ TEMP is as for riscv_force_temporary.
1978+
1979+ The returned expression can be used as the first operand to a LO_SUM. */
1980+
1981+static rtx
1982+riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
1983+{
1984+ addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
1985+ return riscv_force_temporary (temp, addr);
1986+}
1987+
1988+/* Load an entry from the GOT. */
1989+static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
1990+{
1991+ return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
1992+}
1993+
1994+static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
1995+{
1996+ return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
1997+}
1998+
1999+static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
2000+{
2001+ rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
2002+ return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
2003+}
2004+
2005+/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2006+ it appears in a MEM of that mode. Return true if ADDR is a legitimate
2007+ constant in that context and can be split into high and low parts.
2008+ If so, and if LOW_OUT is nonnull, emit the high part and store the
2009+ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
2010+
2011+ TEMP is as for riscv_force_temporary and is used to load the high
2012+ part into a register.
2013+
2014+ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2015+ a legitimize SET_SRC for an .md pattern, otherwise the low part
2016+ is guaranteed to be a legitimate address for mode MODE. */
2017+
2018+bool
2019+riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
2020+{
2021+ enum riscv_symbol_type symbol_type;
2022+ rtx high;
2023+
2024+ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
2025+ || !riscv_symbolic_constant_p (addr, &symbol_type)
2026+ || riscv_symbol_insns (symbol_type) == 0
2027+ || !riscv_hi_relocs[symbol_type])
2028+ return false;
2029+
2030+ if (low_out)
2031+ {
2032+ switch (symbol_type)
2033+ {
2034+ case SYMBOL_ABSOLUTE:
2035+ high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2036+ high = riscv_force_temporary (temp, high);
2037+ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2038+ break;
2039+
2040+ default:
2041+ gcc_unreachable ();
2042+ }
2043+ }
2044+
2045+ return true;
2046+}
2047+
2048+/* Return a legitimate address for REG + OFFSET. TEMP is as for
2049+ riscv_force_temporary; it is only needed when OFFSET is not a
2050+ SMALL_OPERAND. */
2051+
2052+static rtx
2053+riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2054+{
2055+ if (!SMALL_OPERAND (offset))
2056+ {
2057+ rtx high;
2058+
2059+ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
2060+ The addition inside the macro CONST_HIGH_PART may cause an
2061+ overflow, so we need to force a sign-extension check. */
2062+ high = gen_int_mode (RISCV_CONST_HIGH_PART (offset), Pmode);
2063+ offset = RISCV_CONST_LOW_PART (offset);
2064+ high = riscv_force_temporary (temp, high);
2065+ reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2066+ }
2067+ return plus_constant (Pmode, reg, offset);
2068+}
2069+
2070+/* The __tls_get_attr symbol. */
2071+static GTY(()) rtx riscv_tls_symbol;
2072+
2073+/* Return an instruction sequence that calls __tls_get_addr. SYM is
2074+ the TLS symbol we are referencing and TYPE is the symbol type to use
2075+ (either global dynamic or local dynamic). RESULT is an RTX for the
2076+ return value location. */
2077+
2078+static rtx
2079+riscv_call_tls_get_addr (rtx sym, rtx result)
2080+{
2081+ rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2082+
2083+ if (!riscv_tls_symbol)
2084+ riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
2085+
2086+ start_sequence ();
2087+
2088+ emit_insn (riscv_got_load_tls_gd (a0, sym));
2089+ insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
2090+ RTL_CONST_CALL_P (insn) = 1;
2091+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2092+ insn = get_insns ();
2093+
2094+ end_sequence ();
2095+
2096+ return insn;
2097+}
2098+
2099+/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2100+ its address. The return value will be both a valid address and a valid
2101+ SET_SRC (either a REG or a LO_SUM). */
2102+
2103+static rtx
2104+riscv_legitimize_tls_address (rtx loc)
2105+{
2106+ rtx dest, insn, tp, tmp1;
2107+ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
2108+
2109+ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
2110+ if (!flag_pic)
2111+ model = TLS_MODEL_LOCAL_EXEC;
2112+
2113+ switch (model)
2114+ {
2115+ case TLS_MODEL_LOCAL_DYNAMIC:
2116+ /* Rely on section anchors for the optimization that LDM TLS
2117+ provides. The anchor's address is loaded with GD TLS. */
2118+ case TLS_MODEL_GLOBAL_DYNAMIC:
2119+ tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
2120+ insn = riscv_call_tls_get_addr (loc, tmp1);
2121+ dest = gen_reg_rtx (Pmode);
2122+ emit_libcall_block (insn, dest, tmp1, loc);
2123+ break;
2124+
2125+ case TLS_MODEL_INITIAL_EXEC:
2126+ /* la.tls.ie; tp-relative add */
2127+ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
2128+ tmp1 = gen_reg_rtx (Pmode);
2129+ emit_insn (riscv_got_load_tls_ie (tmp1, loc));
2130+ dest = gen_reg_rtx (Pmode);
2131+ emit_insn (gen_add3_insn (dest, tmp1, tp));
2132+ break;
2133+
2134+ case TLS_MODEL_LOCAL_EXEC:
2135+ tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
2136+ dest = gen_reg_rtx (Pmode);
2137+ emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
2138+ dest = gen_rtx_LO_SUM (Pmode, dest,
2139+ riscv_unspec_address (loc, SYMBOL_TLS_LE));
2140+ break;
2141+
2142+ default:
2143+ gcc_unreachable ();
2144+ }
2145+ return dest;
2146+}
2147+
2148+/* If X is not a valid address for mode MODE, force it into a register. */
2149+
2150+static rtx
2151+riscv_force_address (rtx x, enum machine_mode mode)
2152+{
2153+ if (!riscv_legitimate_address_p (mode, x, false))
2154+ x = force_reg (Pmode, x);
2155+ return x;
2156+}
2157+
2158+/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
2159+ be legitimized in a way that the generic machinery might not expect,
2160+ return a new address, otherwise return NULL. MODE is the mode of
2161+ the memory being accessed. */
2162+
2163+static rtx
2164+riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2165+ enum machine_mode mode)
2166+{
2167+ rtx addr;
2168+
2169+ if (riscv_tls_symbol_p (x))
2170+ return riscv_legitimize_tls_address (x);
2171+
2172+ /* See if the address can split into a high part and a LO_SUM. */
2173+ if (riscv_split_symbol (NULL, x, mode, &addr))
2174+ return riscv_force_address (addr, mode);
2175+
2176+ /* Handle BASE + OFFSET using riscv_add_offset. */
2177+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
2178+ && INTVAL (XEXP (x, 1)) != 0)
2179+ {
2180+ rtx base = XEXP (x, 0);
2181+ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
2182+
2183+ if (!riscv_valid_base_register_p (base, mode, false))
2184+ base = copy_to_mode_reg (Pmode, base);
2185+ addr = riscv_add_offset (NULL, base, offset);
2186+ return riscv_force_address (addr, mode);
2187+ }
2188+
2189+ return x;
2190+}
2191+
2192+/* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
2193+
2194+void
2195+riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
2196+{
2197+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
2198+ enum machine_mode mode;
2199+ int i, num_ops;
2200+ rtx x;
2201+
2202+ mode = GET_MODE (dest);
2203+ num_ops = riscv_build_integer (codes, value, mode);
2204+
2205+ if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
2206+ && num_ops >= riscv_split_integer_cost (value))
2207+ x = riscv_split_integer (value, mode);
2208+ else
2209+ {
2210+ /* Apply each binary operation to X. */
2211+ x = GEN_INT (codes[0].value);
2212+
2213+ for (i = 1; i < num_ops; i++)
2214+ {
2215+ if (!can_create_pseudo_p ())
2216+ {
2217+ emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2218+ x = temp;
2219+ }
2220+ else
2221+ x = force_reg (mode, x);
2222+
2223+ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2224+ }
2225+ }
2226+
2227+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2228+}
2229+
2230+/* Subroutine of riscv_legitimize_move. Move constant SRC into register
2231+ DEST given that SRC satisfies immediate_operand but doesn't satisfy
2232+ move_operand. */
2233+
2234+static void
2235+riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2236+{
2237+ rtx base, offset;
2238+
2239+ /* Split moves of big integers into smaller pieces. */
2240+ if (splittable_const_int_operand (src, mode))
2241+ {
2242+ riscv_move_integer (dest, dest, INTVAL (src));
2243+ return;
2244+ }
2245+
2246+ /* Split moves of symbolic constants into high/low pairs. */
2247+ if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2248+ {
2249+ emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2250+ return;
2251+ }
2252+
2253+ /* Generate the appropriate access sequences for TLS symbols. */
2254+ if (riscv_tls_symbol_p (src))
2255+ {
2256+ riscv_emit_move (dest, riscv_legitimize_tls_address (src));
2257+ return;
2258+ }
2259+
2260+ /* If we have (const (plus symbol offset)), and that expression cannot
2261+ be forced into memory, load the symbol first and add in the offset. Also
2262+ prefer to do this even if the constant _can_ be forced into memory, as it
2263+ usually produces better code. */
2264+ split_const (src, &base, &offset);
2265+ if (offset != const0_rtx
2266+ && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
2267+ {
2268+ base = riscv_force_temporary (dest, base);
2269+ riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
2270+ return;
2271+ }
2272+
2273+ src = force_const_mem (mode, src);
2274+
2275+ /* When using explicit relocs, constant pool references are sometimes
2276+ not legitimate addresses. */
2277+ riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2278+ riscv_emit_move (dest, src);
2279+}
2280+
2281+/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
2282+ sequence that is valid. */
2283+
2284+bool
2285+riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2286+{
2287+ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2288+ {
2289+ riscv_emit_move (dest, force_reg (mode, src));
2290+ return true;
2291+ }
2292+
2293+ /* We need to deal with constants that would be legitimate
2294+ immediate_operands but aren't legitimate move_operands. */
2295+ if (CONSTANT_P (src) && !move_operand (src, mode))
2296+ {
2297+ riscv_legitimize_const_move (mode, dest, src);
2298+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2299+ return true;
2300+ }
2301+ return false;
2302+}
2303+
2304+/* Return true if there is an instruction that implements CODE and accepts
2305+ X as an immediate operand. */
2306+
2307+static int
2308+riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
2309+{
2310+ switch (code)
2311+ {
2312+ case ASHIFT:
2313+ case ASHIFTRT:
2314+ case LSHIFTRT:
2315+ /* All shift counts are truncated to a valid constant. */
2316+ return true;
2317+
2318+ case AND:
2319+ case IOR:
2320+ case XOR:
2321+ case PLUS:
2322+ case LT:
2323+ case LTU:
2324+ /* These instructions take 12-bit signed immediates. */
2325+ return SMALL_OPERAND (x);
2326+
2327+ case LE:
2328+ /* We add 1 to the immediate and use SLT. */
2329+ return SMALL_OPERAND (x + 1);
2330+
2331+ case LEU:
2332+ /* Likewise SLTU, but reject the always-true case. */
2333+ return SMALL_OPERAND (x + 1) && x + 1 != 0;
2334+
2335+ case GE:
2336+ case GEU:
2337+ /* We can emulate an immediate of 1 by using GT/GTU against x0. */
2338+ return x == 1;
2339+
2340+ default:
2341+ /* By default assume that x0 can be used for 0. */
2342+ return x == 0;
2343+ }
2344+}
2345+
2346+/* Return the cost of binary operation X, given that the instruction
2347+ sequence for a word-sized or smaller operation takes SIGNLE_INSNS
2348+ instructions and that the sequence of a double-word operation takes
2349+ DOUBLE_INSNS instructions. */
2350+
2351+static int
2352+riscv_binary_cost (rtx x, int single_insns, int double_insns)
2353+{
2354+ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
2355+ return COSTS_N_INSNS (double_insns);
2356+ return COSTS_N_INSNS (single_insns);
2357+}
2358+
2359+/* Return the cost of sign-extending OP to mode MODE, not including the
2360+ cost of OP itself. */
2361+
2362+static int
2363+riscv_sign_extend_cost (enum machine_mode mode, rtx op)
2364+{
2365+ if (MEM_P (op))
2366+ /* Extended loads are as cheap as unextended ones. */
2367+ return 0;
2368+
2369+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2370+ /* A sign extension from SImode to DImode in 64-bit mode is free. */
2371+ return 0;
2372+
2373+ /* We need to use a shift left and a shift right. */
2374+ return COSTS_N_INSNS (2);
2375+}
2376+
2377+/* Return the cost of zero-extending OP to mode MODE, not including the
2378+ cost of OP itself. */
2379+
2380+static int
2381+riscv_zero_extend_cost (enum machine_mode mode, rtx op)
2382+{
2383+ if (MEM_P (op))
2384+ /* Extended loads are as cheap as unextended ones. */
2385+ return 0;
2386+
2387+ if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
2388+ ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
2389+ /* We need a shift left by 32 bits and a shift right by 32 bits. */
2390+ return COSTS_N_INSNS (2);
2391+
2392+ /* We can use ANDI. */
2393+ return COSTS_N_INSNS (1);
2394+}
2395+
2396+/* Implement TARGET_RTX_COSTS. */
2397+
2398+static bool
2399+riscv_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2400+ int *total, bool speed)
2401+{
2402+ enum machine_mode mode = GET_MODE (x);
2403+ bool float_mode_p = FLOAT_MODE_P (mode);
2404+ int cost;
2405+
2406+ switch (code)
2407+ {
2408+ case CONST_INT:
2409+ if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
2410+ {
2411+ *total = 0;
2412+ return true;
2413+ }
2414+ /* Fall through. */
2415+
2416+ case SYMBOL_REF:
2417+ case LABEL_REF:
2418+ case CONST_DOUBLE:
2419+ case CONST:
2420+ if (speed)
2421+ *total = 1;
2422+ else if ((cost = riscv_const_insns (x)) > 0)
2423+ *total = COSTS_N_INSNS (cost);
2424+ else /* The instruction will be fetched from the constant pool. */
2425+ *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
2426+ return true;
2427+
2428+ case MEM:
2429+ /* If the address is legitimate, return the number of
2430+ instructions it needs. */
2431+ if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
2432+ {
2433+ *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
2434+ return true;
2435+ }
2436+ /* Otherwise use the default handling. */
2437+ return false;
2438+
2439+ case NOT:
2440+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
2441+ return false;
2442+
2443+ case AND:
2444+ case IOR:
2445+ case XOR:
2446+ /* Double-word operations use two single-word operations. */
2447+ *total = riscv_binary_cost (x, 1, 2);
2448+ return false;
2449+
2450+ case ASHIFT:
2451+ case ASHIFTRT:
2452+ case LSHIFTRT:
2453+ *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
2454+ return false;
2455+
2456+ case ABS:
2457+ *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
2458+ return false;
2459+
2460+ case LO_SUM:
2461+ *total = set_src_cost (XEXP (x, 0), speed);
2462+ return true;
2463+
2464+ case LT:
2465+ case LTU:
2466+ case LE:
2467+ case LEU:
2468+ case GT:
2469+ case GTU:
2470+ case GE:
2471+ case GEU:
2472+ case EQ:
2473+ case NE:
2474+ case UNORDERED:
2475+ case LTGT:
2476+ /* Branch comparisons have VOIDmode, so use the first operand's
2477+ mode instead. */
2478+ mode = GET_MODE (XEXP (x, 0));
2479+ if (float_mode_p)
2480+ *total = tune_info->fp_add[mode == DFmode];
2481+ else
2482+ *total = riscv_binary_cost (x, 1, 3);
2483+ return false;
2484+
2485+ case MINUS:
2486+ if (float_mode_p
2487+ && !HONOR_NANS (mode)
2488+ && !HONOR_SIGNED_ZEROS (mode))
2489+ {
2490+ /* See if we can use NMADD or NMSUB. See riscv.md for the
2491+ associated patterns. */
2492+ rtx op0 = XEXP (x, 0);
2493+ rtx op1 = XEXP (x, 1);
2494+ if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
2495+ {
2496+ *total = (tune_info->fp_mul[mode == DFmode]
2497+ + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
2498+ + set_src_cost (XEXP (op0, 1), speed)
2499+ + set_src_cost (op1, speed));
2500+ return true;
2501+ }
2502+ if (GET_CODE (op1) == MULT)
2503+ {
2504+ *total = (tune_info->fp_mul[mode == DFmode]
2505+ + set_src_cost (op0, speed)
2506+ + set_src_cost (XEXP (op1, 0), speed)
2507+ + set_src_cost (XEXP (op1, 1), speed));
2508+ return true;
2509+ }
2510+ }
2511+ /* Fall through. */
2512+
2513+ case PLUS:
2514+ if (float_mode_p)
2515+ *total = tune_info->fp_add[mode == DFmode];
2516+ else
2517+ *total = riscv_binary_cost (x, 1, 4);
2518+ return false;
2519+
2520+ case NEG:
2521+ if (float_mode_p
2522+ && !HONOR_NANS (mode)
2523+ && HONOR_SIGNED_ZEROS (mode))
2524+ {
2525+ /* See if we can use NMADD or NMSUB. See riscv.md for the
2526+ associated patterns. */
2527+ rtx op = XEXP (x, 0);
2528+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
2529+ && GET_CODE (XEXP (op, 0)) == MULT)
2530+ {
2531+ *total = (tune_info->fp_mul[mode == DFmode]
2532+ + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
2533+ + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
2534+ + set_src_cost (XEXP (op, 1), speed));
2535+ return true;
2536+ }
2537+ }
2538+
2539+ if (float_mode_p)
2540+ *total = tune_info->fp_add[mode == DFmode];
2541+ else
2542+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
2543+ return false;
2544+
2545+ case MULT:
2546+ if (float_mode_p)
2547+ *total = tune_info->fp_mul[mode == DFmode];
2548+ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
2549+ *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
2550+ else if (!speed)
2551+ *total = COSTS_N_INSNS (1);
2552+ else
2553+ *total = tune_info->int_mul[mode == DImode];
2554+ return false;
2555+
2556+ case DIV:
2557+ case SQRT:
2558+ case MOD:
2559+ if (float_mode_p)
2560+ {
2561+ *total = tune_info->fp_div[mode == DFmode];
2562+ return false;
2563+ }
2564+ /* Fall through. */
2565+
2566+ case UDIV:
2567+ case UMOD:
2568+ if (speed)
2569+ *total = tune_info->int_div[mode == DImode];
2570+ else
2571+ *total = COSTS_N_INSNS (1);
2572+ return false;
2573+
2574+ case SIGN_EXTEND:
2575+ *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
2576+ return false;
2577+
2578+ case ZERO_EXTEND:
2579+ *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
2580+ return false;
2581+
2582+ case FLOAT:
2583+ case UNSIGNED_FLOAT:
2584+ case FIX:
2585+ case FLOAT_EXTEND:
2586+ case FLOAT_TRUNCATE:
2587+ *total = tune_info->fp_add[mode == DFmode];
2588+ return false;
2589+
2590+ default:
2591+ return false;
2592+ }
2593+}
2594+
2595+/* Implement TARGET_ADDRESS_COST. */
2596+
2597+static int
2598+riscv_address_cost (rtx addr, enum machine_mode mode,
2599+ addr_space_t as ATTRIBUTE_UNUSED,
2600+ bool speed ATTRIBUTE_UNUSED)
2601+{
2602+ return riscv_address_insns (addr, mode, false);
2603+}
2604+
2605+/* Return one word of double-word value OP. HIGH_P is true to select the
2606+ high part or false to select the low part. */
2607+
2608+rtx
2609+riscv_subword (rtx op, bool high_p)
2610+{
2611+ unsigned int byte;
2612+ enum machine_mode mode;
2613+
2614+ mode = GET_MODE (op);
2615+ if (mode == VOIDmode)
2616+ mode = TARGET_64BIT ? TImode : DImode;
2617+
2618+ byte = high_p ? UNITS_PER_WORD : 0;
2619+
2620+ if (FP_REG_RTX_P (op))
2621+ return gen_rtx_REG (word_mode, REGNO (op) + high_p);
2622+
2623+ if (MEM_P (op))
2624+ return adjust_address (op, word_mode, byte);
2625+
2626+ return simplify_gen_subreg (word_mode, op, mode, byte);
2627+}
2628+
2629+/* Return true if a 64-bit move from SRC to DEST should be split into two. */
2630+
2631+bool
2632+riscv_split_64bit_move_p (rtx dest, rtx src)
2633+{
2634+ /* All 64b moves are legal in 64b mode. All 64b FPR <-> FPR and
2635+ FPR <-> MEM moves are legal in 32b mode, too. Although
2636+ FPR <-> GPR moves are not available in general in 32b mode,
2637+ we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
2638+ return !(TARGET_64BIT
2639+ || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2640+ || (FP_REG_RTX_P (dest) && MEM_P (src))
2641+ || (FP_REG_RTX_P (src) && MEM_P (dest))
2642+ || (FP_REG_RTX_P(dest) && src == CONST0_RTX(GET_MODE(src))));
2643+}
2644+
2645+/* Split a doubleword move from SRC to DEST. On 32-bit targets,
2646+ this function handles 64-bit moves for which riscv_split_64bit_move_p
2647+ holds. For 64-bit targets, this function handles 128-bit moves. */
2648+
2649+void
2650+riscv_split_doubleword_move (rtx dest, rtx src)
2651+{
2652+ rtx low_dest;
2653+
2654+ /* The operation can be split into two normal moves. Decide in
2655+ which order to do them. */
2656+ low_dest = riscv_subword (dest, false);
2657+ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
2658+ {
2659+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
2660+ riscv_emit_move (low_dest, riscv_subword (src, false));
2661+ }
2662+ else
2663+ {
2664+ riscv_emit_move (low_dest, riscv_subword (src, false));
2665+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
2666+ }
2667+}
2668+
2669+/* Return the appropriate instructions to move SRC into DEST. Assume
2670+ that SRC is operand 1 and DEST is operand 0. */
2671+
2672+const char *
2673+riscv_output_move (rtx dest, rtx src)
2674+{
2675+ enum rtx_code dest_code, src_code;
2676+ enum machine_mode mode;
2677+ bool dbl_p;
2678+
2679+ dest_code = GET_CODE (dest);
2680+ src_code = GET_CODE (src);
2681+ mode = GET_MODE (dest);
2682+ dbl_p = (GET_MODE_SIZE (mode) == 8);
2683+
2684+ if (dbl_p && riscv_split_64bit_move_p (dest, src))
2685+ return "#";
2686+
2687+ if ((src_code == REG && GP_REG_P (REGNO (src)))
2688+ || (src == CONST0_RTX (mode)))
2689+ {
2690+ if (dest_code == REG)
2691+ {
2692+ if (GP_REG_P (REGNO (dest)))
2693+ return "move\t%0,%z1";
2694+
2695+ if (FP_REG_P (REGNO (dest)))
2696+ {
2697+ if (!dbl_p)
2698+ return "fmv.s.x\t%0,%z1";
2699+ if (TARGET_64BIT)
2700+ return "fmv.d.x\t%0,%z1";
2701+ /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
2702+ gcc_assert (src == CONST0_RTX (mode));
2703+ return "fcvt.d.w\t%0,x0";
2704+ }
2705+ }
2706+ if (dest_code == MEM)
2707+ switch (GET_MODE_SIZE (mode))
2708+ {
2709+ case 1: return "sb\t%z1,%0";
2710+ case 2: return "sh\t%z1,%0";
2711+ case 4: return "sw\t%z1,%0";
2712+ case 8: return "sd\t%z1,%0";
2713+ }
2714+ }
2715+ if (dest_code == REG && GP_REG_P (REGNO (dest)))
2716+ {
2717+ if (src_code == REG)
2718+ {
2719+ if (FP_REG_P (REGNO (src)))
2720+ return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
2721+ }
2722+
2723+ if (src_code == MEM)
2724+ switch (GET_MODE_SIZE (mode))
2725+ {
2726+ case 1: return "lbu\t%0,%1";
2727+ case 2: return "lhu\t%0,%1";
2728+ case 4: return "lw\t%0,%1";
2729+ case 8: return "ld\t%0,%1";
2730+ }
2731+
2732+ if (src_code == CONST_INT)
2733+ return "li\t%0,%1";
2734+
2735+ if (src_code == HIGH)
2736+ return "lui\t%0,%h1";
2737+
2738+ if (symbolic_operand (src, VOIDmode))
2739+ switch (riscv_classify_symbolic_expression (src))
2740+ {
2741+ case SYMBOL_GOT_DISP: return "la\t%0,%1";
2742+ case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
2743+ default: gcc_unreachable();
2744+ }
2745+ }
2746+ if (src_code == REG && FP_REG_P (REGNO (src)))
2747+ {
2748+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
2749+ return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
2750+
2751+ if (dest_code == MEM)
2752+ return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
2753+ }
2754+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
2755+ {
2756+ if (src_code == MEM)
2757+ return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
2758+ }
2759+ gcc_unreachable ();
2760+}
2761+
2762+/* Return true if CMP1 is a suitable second operand for integer ordering
2763+ test CODE. See also the *sCC patterns in riscv.md. */
2764+
2765+static bool
2766+riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
2767+{
2768+ switch (code)
2769+ {
2770+ case GT:
2771+ case GTU:
2772+ return reg_or_0_operand (cmp1, VOIDmode);
2773+
2774+ case GE:
2775+ case GEU:
2776+ return cmp1 == const1_rtx;
2777+
2778+ case LT:
2779+ case LTU:
2780+ return arith_operand (cmp1, VOIDmode);
2781+
2782+ case LE:
2783+ return sle_operand (cmp1, VOIDmode);
2784+
2785+ case LEU:
2786+ return sleu_operand (cmp1, VOIDmode);
2787+
2788+ default:
2789+ gcc_unreachable ();
2790+ }
2791+}
2792+
2793+/* Return true if *CMP1 (of mode MODE) is a valid second operand for
2794+ integer ordering test *CODE, or if an equivalent combination can
2795+ be formed by adjusting *CODE and *CMP1. When returning true, update
2796+ *CODE and *CMP1 with the chosen code and operand, otherwise leave
2797+ them alone. */
2798+
2799+static bool
2800+riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
2801+ enum machine_mode mode)
2802+{
2803+ HOST_WIDE_INT plus_one;
2804+
2805+ if (riscv_int_order_operand_ok_p (*code, *cmp1))
2806+ return true;
2807+
2808+ if (CONST_INT_P (*cmp1))
2809+ switch (*code)
2810+ {
2811+ case LE:
2812+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
2813+ if (INTVAL (*cmp1) < plus_one)
2814+ {
2815+ *code = LT;
2816+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
2817+ return true;
2818+ }
2819+ break;
2820+
2821+ case LEU:
2822+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
2823+ if (plus_one != 0)
2824+ {
2825+ *code = LTU;
2826+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
2827+ return true;
2828+ }
2829+ break;
2830+
2831+ default:
2832+ break;
2833+ }
2834+ return false;
2835+}
2836+
2837+/* Compare CMP0 and CMP1 using ordering test CODE and store the result
2838+ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
2839+ is nonnull, it's OK to set TARGET to the inverse of the result and
2840+ flip *INVERT_PTR instead. */
2841+
2842+static void
2843+riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
2844+ rtx target, rtx cmp0, rtx cmp1)
2845+{
2846+ enum machine_mode mode;
2847+
2848+ /* First see if there is a RISCV instruction that can do this operation.
2849+ If not, try doing the same for the inverse operation. If that also
2850+ fails, force CMP1 into a register and try again. */
2851+ mode = GET_MODE (cmp0);
2852+ if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
2853+ riscv_emit_binary (code, target, cmp0, cmp1);
2854+ else
2855+ {
2856+ enum rtx_code inv_code = reverse_condition (code);
2857+ if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
2858+ {
2859+ cmp1 = force_reg (mode, cmp1);
2860+ riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
2861+ }
2862+ else if (invert_ptr == 0)
2863+ {
2864+ rtx inv_target;
2865+
2866+ inv_target = riscv_force_binary (GET_MODE (target),
2867+ inv_code, cmp0, cmp1);
2868+ riscv_emit_binary (XOR, target, inv_target, const1_rtx);
2869+ }
2870+ else
2871+ {
2872+ *invert_ptr = !*invert_ptr;
2873+ riscv_emit_binary (inv_code, target, cmp0, cmp1);
2874+ }
2875+ }
2876+}
2877+
2878+/* Return a register that is zero iff CMP0 and CMP1 are equal.
2879+ The register will have the same mode as CMP0. */
2880+
2881+static rtx
2882+riscv_zero_if_equal (rtx cmp0, rtx cmp1)
2883+{
2884+ if (cmp1 == const0_rtx)
2885+ return cmp0;
2886+
2887+ return expand_binop (GET_MODE (cmp0), sub_optab,
2888+ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2889+}
2890+
2891+/* Return false if we can easily emit code for the FP comparison specified
2892+ by *CODE. If not, set *CODE to its inverse and return true. */
2893+
2894+static bool
2895+riscv_reversed_fp_cond (enum rtx_code *code)
2896+{
2897+ switch (*code)
2898+ {
2899+ case EQ:
2900+ case LT:
2901+ case LE:
2902+ case GT:
2903+ case GE:
2904+ case LTGT:
2905+ case ORDERED:
2906+ /* We know how to emit code for these cases... */
2907+ return false;
2908+
2909+ default:
2910+ /* ...but we must invert these and rely on the others. */
2911+ *code = reverse_condition_maybe_unordered (*code);
2912+ return true;
2913+ }
2914+}
2915+
2916+/* Convert a comparison into something that can be used in a branch or
2917+ conditional move. On entry, *OP0 and *OP1 are the values being
2918+ compared and *CODE is the code used to compare them.
2919+
2920+ Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
2921+
2922+static void
2923+riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
2924+{
2925+ rtx cmp_op0 = *op0;
2926+ rtx cmp_op1 = *op1;
2927+
2928+ if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
2929+ {
2930+ if (splittable_const_int_operand (cmp_op1, VOIDmode))
2931+ {
2932+ HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
2933+ enum rtx_code new_code;
2934+
2935+ switch (*code)
2936+ {
2937+ case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
2938+ case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
2939+ case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
2940+ case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
2941+ case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
2942+ case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
2943+ case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
2944+ case GE: new_rhs = rhs - 1; new_code = GT;
2945+ try_new_rhs:
2946+ /* Convert e.g. OP0 > 4095 into OP0 >= 4096. */
2947+ if ((rhs < 0) == (new_rhs < 0)
2948+ && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
2949+ {
2950+ *op1 = GEN_INT (new_rhs);
2951+ *code = new_code;
2952+ }
2953+ break;
2954+
2955+ case EQ:
2956+ case NE:
2957+ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
2958+ if (SMALL_OPERAND (-rhs))
2959+ {
2960+ *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
2961+ riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
2962+ *op1 = const0_rtx;
2963+ }
2964+ default:
2965+ break;
2966+ }
2967+ }
2968+
2969+ if (*op1 != const0_rtx)
2970+ *op1 = force_reg (GET_MODE (cmp_op0), *op1);
2971+ }
2972+ else
2973+ {
2974+ /* For FP comparisons, set an integer register with the result of the
2975+ comparison, then branch on it. */
2976+ rtx tmp0, tmp1, final_op;
2977+ enum rtx_code fp_code = *code;
2978+ *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
2979+
2980+ switch (fp_code)
2981+ {
2982+ case ORDERED:
2983+ /* a == a && b == b */
2984+ tmp0 = gen_reg_rtx (SImode);
2985+ riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
2986+ tmp1 = gen_reg_rtx (SImode);
2987+ riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
2988+ final_op = gen_reg_rtx (SImode);
2989+ riscv_emit_binary (AND, final_op, tmp0, tmp1);
2990+ break;
2991+
2992+ case LTGT:
2993+ /* a < b || a > b */
2994+ tmp0 = gen_reg_rtx (SImode);
2995+ riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
2996+ tmp1 = gen_reg_rtx (SImode);
2997+ riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
2998+ final_op = gen_reg_rtx (SImode);
2999+ riscv_emit_binary (IOR, final_op, tmp0, tmp1);
3000+ break;
3001+
3002+ case EQ:
3003+ case LE:
3004+ case LT:
3005+ case GE:
3006+ case GT:
3007+ /* We have instructions for these cases. */
3008+ final_op = gen_reg_rtx (SImode);
3009+ riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
3010+ break;
3011+
3012+ default:
3013+ gcc_unreachable ();
3014+ }
3015+
3016+ /* Compare the binary result against 0. */
3017+ *op0 = final_op;
3018+ *op1 = const0_rtx;
3019+ }
3020+}
3021+
3022+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
3023+ and OPERAND[3]. Store the result in OPERANDS[0].
3024+
3025+ On 64-bit targets, the mode of the comparison and target will always be
3026+ SImode, thus possibly narrower than that of the comparison's operands. */
3027+
3028+void
3029+riscv_expand_scc (rtx operands[])
3030+{
3031+ rtx target = operands[0];
3032+ enum rtx_code code = GET_CODE (operands[1]);
3033+ rtx op0 = operands[2];
3034+ rtx op1 = operands[3];
3035+
3036+ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
3037+
3038+ if (code == EQ || code == NE)
3039+ {
3040+ rtx zie = riscv_zero_if_equal (op0, op1);
3041+ riscv_emit_binary (code, target, zie, const0_rtx);
3042+ }
3043+ else
3044+ riscv_emit_int_order_test (code, 0, target, op0, op1);
3045+}
3046+
3047+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
3048+ CODE and jump to OPERANDS[3] if the condition holds. */
3049+
3050+void
3051+riscv_expand_conditional_branch (rtx *operands)
3052+{
3053+ enum rtx_code code = GET_CODE (operands[0]);
3054+ rtx op0 = operands[1];
3055+ rtx op1 = operands[2];
3056+ rtx condition;
3057+
3058+ riscv_emit_compare (&code, &op0, &op1);
3059+ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3060+ emit_jump_insn (gen_condjump (condition, operands[3]));
3061+}
3062+
3063+/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
3064+ least PARM_BOUNDARY bits of alignment, but will be given anything up
3065+ to STACK_BOUNDARY bits if the type requires it. */
3066+
3067+static unsigned int
3068+riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
3069+{
3070+ unsigned int alignment;
3071+
3072+ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3073+ if (alignment < PARM_BOUNDARY)
3074+ alignment = PARM_BOUNDARY;
3075+ if (alignment > STACK_BOUNDARY)
3076+ alignment = STACK_BOUNDARY;
3077+ return alignment;
3078+}
3079+
3080+/* Fill INFO with information about a single argument. CUM is the
3081+ cumulative state for earlier arguments. MODE is the mode of this
3082+ argument and TYPE is its type (if known). NAMED is true if this
3083+ is a named (fixed) argument rather than a variable one. */
3084+
3085+static void
3086+riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
3087+ enum machine_mode mode, const_tree type, bool named)
3088+{
3089+ bool doubleword_aligned_p;
3090+ unsigned int num_bytes, num_words, max_regs;
3091+
3092+ /* Work out the size of the argument. */
3093+ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3094+ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3095+
3096+ /* Scalar, complex and vector floating-point types are passed in
3097+ floating-point registers, as long as this is a named rather
3098+ than a variable argument. */
3099+ info->fpr_p = (named
3100+ && (type == 0 || FLOAT_TYPE_P (type))
3101+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
3102+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3103+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3104+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3105+
3106+ /* Complex floats should only go into FPRs if there are two FPRs free,
3107+ otherwise they should be passed in the same way as a struct
3108+ containing two floats. */
3109+ if (info->fpr_p
3110+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3111+ && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3112+ {
3113+ if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3114+ info->fpr_p = false;
3115+ else
3116+ num_words = 2;
3117+ }
3118+
3119+ /* See whether the argument has doubleword alignment. */
3120+ doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
3121+ > BITS_PER_WORD);
3122+
3123+ /* Set REG_OFFSET to the register count we're interested in.
3124+ The EABI allocates the floating-point registers separately,
3125+ but the other ABIs allocate them like integer registers. */
3126+ info->reg_offset = cum->num_gprs;
3127+
3128+ /* Advance to an even register if the argument is doubleword-aligned. */
3129+ if (doubleword_aligned_p)
3130+ info->reg_offset += info->reg_offset & 1;
3131+
3132+ /* Work out the offset of a stack argument. */
3133+ info->stack_offset = cum->stack_words;
3134+ if (doubleword_aligned_p)
3135+ info->stack_offset += info->stack_offset & 1;
3136+
3137+ max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3138+
3139+ /* Partition the argument between registers and stack. */
3140+ info->reg_words = MIN (num_words, max_regs);
3141+ info->stack_words = num_words - info->reg_words;
3142+}
3143+
3144+/* INFO describes a register argument that has the normal format for the
3145+ argument's mode. Return the register it uses, assuming that FPRs are
3146+ available if HARD_FLOAT_P. */
3147+
3148+static unsigned int
3149+riscv_arg_regno (const struct riscv_arg_info *info, bool hard_float_p)
3150+{
3151+ if (!info->fpr_p || !hard_float_p)
3152+ return GP_ARG_FIRST + info->reg_offset;
3153+ else
3154+ return FP_ARG_FIRST + info->reg_offset;
3155+}
3156+
3157+/* Implement TARGET_FUNCTION_ARG. */
3158+
3159+static rtx
3160+riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
3161+ const_tree type, bool named)
3162+{
3163+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
3164+ struct riscv_arg_info info;
3165+
3166+ if (mode == VOIDmode)
3167+ return NULL;
3168+
3169+ riscv_get_arg_info (&info, cum, mode, type, named);
3170+
3171+ /* Return straight away if the whole argument is passed on the stack. */
3172+ if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3173+ return NULL;
3174+
3175+ /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
3176+ contains a double in its entirety, then that 64-bit chunk is passed
3177+ in a floating-point register. */
3178+ if (TARGET_HARD_FLOAT
3179+ && named
3180+ && type != 0
3181+ && TREE_CODE (type) == RECORD_TYPE
3182+ && TYPE_SIZE_UNIT (type)
3183+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
3184+ {
3185+ tree field;
3186+
3187+ /* First check to see if there is any such field. */
3188+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
3189+ if (TREE_CODE (field) == FIELD_DECL
3190+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
3191+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3192+ && tree_fits_shwi_p (bit_position (field))
3193+ && int_bit_position (field) % BITS_PER_WORD == 0)
3194+ break;
3195+
3196+ if (field != 0)
3197+ {
3198+ /* Now handle the special case by returning a PARALLEL
3199+ indicating where each 64-bit chunk goes. INFO.REG_WORDS
3200+ chunks are passed in registers. */
3201+ unsigned int i;
3202+ HOST_WIDE_INT bitpos;
3203+ rtx ret;
3204+
3205+ /* assign_parms checks the mode of ENTRY_PARM, so we must
3206+ use the actual mode here. */
3207+ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3208+
3209+ bitpos = 0;
3210+ field = TYPE_FIELDS (type);
3211+ for (i = 0; i < info.reg_words; i++)
3212+ {
3213+ rtx reg;
3214+
3215+ for (; field; field = DECL_CHAIN (field))
3216+ if (TREE_CODE (field) == FIELD_DECL
3217+ && int_bit_position (field) >= bitpos)
3218+ break;
3219+
3220+ if (field
3221+ && int_bit_position (field) == bitpos
3222+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
3223+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3224+ reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3225+ else
3226+ reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3227+
3228+ XVECEXP (ret, 0, i)
3229+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
3230+ GEN_INT (bitpos / BITS_PER_UNIT));
3231+
3232+ bitpos += BITS_PER_WORD;
3233+ }
3234+ return ret;
3235+ }
3236+ }
3237+
3238+ /* Handle the n32/n64 conventions for passing complex floating-point
3239+ arguments in FPR pairs. The real part goes in the lower register
3240+ and the imaginary part goes in the upper register. */
3241+ if (info.fpr_p
3242+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3243+ {
3244+ rtx real, imag;
3245+ enum machine_mode inner;
3246+ unsigned int regno;
3247+
3248+ inner = GET_MODE_INNER (mode);
3249+ regno = FP_ARG_FIRST + info.reg_offset;
3250+ if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
3251+ {
3252+ /* Real part in registers, imaginary part on stack. */
3253+ gcc_assert (info.stack_words == info.reg_words);
3254+ return gen_rtx_REG (inner, regno);
3255+ }
3256+ else
3257+ {
3258+ gcc_assert (info.stack_words == 0);
3259+ real = gen_rtx_EXPR_LIST (VOIDmode,
3260+ gen_rtx_REG (inner, regno),
3261+ const0_rtx);
3262+ imag = gen_rtx_EXPR_LIST (VOIDmode,
3263+ gen_rtx_REG (inner,
3264+ regno + info.reg_words / 2),
3265+ GEN_INT (GET_MODE_SIZE (inner)));
3266+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3267+ }
3268+ }
3269+
3270+ return gen_rtx_REG (mode, riscv_arg_regno (&info, TARGET_HARD_FLOAT));
3271+}
3272+
3273+/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
3274+
3275+static void
3276+riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
3277+ const_tree type, bool named)
3278+{
3279+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
3280+ struct riscv_arg_info info;
3281+
3282+ riscv_get_arg_info (&info, cum, mode, type, named);
3283+
3284+ /* Advance the register count. This has the effect of setting
3285+ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
3286+ argument required us to skip the final GPR and pass the whole
3287+ argument on the stack. */
3288+ cum->num_gprs = info.reg_offset + info.reg_words;
3289+
3290+ /* Advance the stack word count. */
3291+ if (info.stack_words > 0)
3292+ cum->stack_words = info.stack_offset + info.stack_words;
3293+}
3294+
3295+/* Implement TARGET_ARG_PARTIAL_BYTES. */
3296+
3297+static int
3298+riscv_arg_partial_bytes (cumulative_args_t cum,
3299+ enum machine_mode mode, tree type, bool named)
3300+{
3301+ struct riscv_arg_info info;
3302+
3303+ riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
3304+ return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3305+}
3306+
3307+/* See whether VALTYPE is a record whose fields should be returned in
3308+ floating-point registers. If so, return the number of fields and
3309+ list them in FIELDS (which should have two elements). Return 0
3310+ otherwise.
3311+
3312+ For n32 & n64, a structure with one or two fields is returned in
3313+ floating-point registers as long as every field has a floating-point
3314+ type. */
3315+
3316+static int
3317+riscv_fpr_return_fields (const_tree valtype, tree *fields)
3318+{
3319+ tree field;
3320+ int i;
3321+
3322+ if (TREE_CODE (valtype) != RECORD_TYPE)
3323+ return 0;
3324+
3325+ i = 0;
3326+ for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
3327+ {
3328+ if (TREE_CODE (field) != FIELD_DECL)
3329+ continue;
3330+
3331+ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
3332+ return 0;
3333+
3334+ if (i == 2)
3335+ return 0;
3336+
3337+ fields[i++] = field;
3338+ }
3339+ return i;
3340+}
3341+
3342+/* Return true if the function return value MODE will get returned in a
3343+ floating-point register. */
3344+
3345+static bool
3346+riscv_return_mode_in_fpr_p (enum machine_mode mode)
3347+{
3348+ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
3349+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
3350+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3351+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
3352+}
3353+
3354+/* Return the representation of an FPR return register when the
3355+ value being returned in FP_RETURN has mode VALUE_MODE and the
3356+ return type itself has mode TYPE_MODE. On NewABI targets,
3357+ the two modes may be different for structures like:
3358+
3359+ struct __attribute__((packed)) foo { float f; }
3360+
3361+ where we return the SFmode value of "f" in FP_RETURN, but where
3362+ the structure itself has mode BLKmode. */
3363+
3364+static rtx
3365+riscv_return_fpr_single (enum machine_mode type_mode,
3366+ enum machine_mode value_mode)
3367+{
3368+ rtx x;
3369+
3370+ x = gen_rtx_REG (value_mode, FP_RETURN);
3371+ if (type_mode != value_mode)
3372+ {
3373+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
3374+ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
3375+ }
3376+ return x;
3377+}
3378+
3379+/* Return a composite value in a pair of floating-point registers.
3380+ MODE1 and OFFSET1 are the mode and byte offset for the first value,
3381+ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
3382+ complete value.
3383+
3384+ For n32 & n64, $f0 always holds the first value and $f2 the second.
3385+ Otherwise the values are packed together as closely as possible. */
3386+
3387+static rtx
3388+riscv_return_fpr_pair (enum machine_mode mode,
3389+ enum machine_mode mode1, HOST_WIDE_INT offset1,
3390+ enum machine_mode mode2, HOST_WIDE_INT offset2)
3391+{
3392+ return gen_rtx_PARALLEL
3393+ (mode,
3394+ gen_rtvec (2,
3395+ gen_rtx_EXPR_LIST (VOIDmode,
3396+ gen_rtx_REG (mode1, FP_RETURN),
3397+ GEN_INT (offset1)),
3398+ gen_rtx_EXPR_LIST (VOIDmode,
3399+ gen_rtx_REG (mode2, FP_RETURN + 1),
3400+ GEN_INT (offset2))));
3401+
3402+}
3403+
3404+/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
3405+ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
3406+ VALTYPE is null and MODE is the mode of the return value. */
3407+
3408+rtx
3409+riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
3410+{
3411+ if (valtype)
3412+ {
3413+ tree fields[2];
3414+ int unsigned_p;
3415+
3416+ mode = TYPE_MODE (valtype);
3417+ unsigned_p = TYPE_UNSIGNED (valtype);
3418+
3419+ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
3420+ return values, promote the mode here too. */
3421+ mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
3422+
3423+ /* Handle structures whose fields are returned in $f0/$f2. */
3424+ switch (riscv_fpr_return_fields (valtype, fields))
3425+ {
3426+ case 1:
3427+ return riscv_return_fpr_single (mode,
3428+ TYPE_MODE (TREE_TYPE (fields[0])));
3429+
3430+ case 2:
3431+ return riscv_return_fpr_pair (mode,
3432+ TYPE_MODE (TREE_TYPE (fields[0])),
3433+ int_byte_position (fields[0]),
3434+ TYPE_MODE (TREE_TYPE (fields[1])),
3435+ int_byte_position (fields[1]));
3436+ }
3437+
3438+ /* Only use FPRs for scalar, complex or vector types. */
3439+ if (!FLOAT_TYPE_P (valtype))
3440+ return gen_rtx_REG (mode, GP_RETURN);
3441+ }
3442+
3443+ /* Handle long doubles for n32 & n64. */
3444+ if (mode == TFmode)
3445+ return riscv_return_fpr_pair (mode,
3446+ DImode, 0,
3447+ DImode, GET_MODE_SIZE (mode) / 2);
3448+
3449+ if (riscv_return_mode_in_fpr_p (mode))
3450+ {
3451+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3452+ return riscv_return_fpr_pair (mode,
3453+ GET_MODE_INNER (mode), 0,
3454+ GET_MODE_INNER (mode),
3455+ GET_MODE_SIZE (mode) / 2);
3456+ else
3457+ return gen_rtx_REG (mode, FP_RETURN);
3458+ }
3459+
3460+ return gen_rtx_REG (mode, GP_RETURN);
3461+}
3462+
3463+/* Implement TARGET_RETURN_IN_MEMORY. Scalars and small structures
3464+ that fit in two registers are returned in a0/a1. */
3465+
3466+static bool
3467+riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
3468+{
3469+ return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
3470+}
3471+
3472+/* Implement TARGET_PASS_BY_REFERENCE. */
3473+
3474+static bool
3475+riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
3476+ enum machine_mode mode, const_tree type,
3477+ bool named ATTRIBUTE_UNUSED)
3478+{
3479+ if (type && riscv_return_in_memory (type, NULL_TREE))
3480+ return true;
3481+ return targetm.calls.must_pass_in_stack (mode, type);
3482+}
3483+
3484+/* Implement TARGET_SETUP_INCOMING_VARARGS. */
3485+
3486+static void
3487+riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
3488+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
3489+ int no_rtl)
3490+{
3491+ CUMULATIVE_ARGS local_cum;
3492+ int gp_saved;
3493+
3494+ /* The caller has advanced CUM up to, but not beyond, the last named
3495+ argument. Advance a local copy of CUM past the last "real" named
3496+ argument, to find out how many registers are left over. */
3497+ local_cum = *get_cumulative_args (cum);
3498+ riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
3499+
3500+ /* Found out how many registers we need to save. */
3501+ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3502+
3503+ if (!no_rtl && gp_saved > 0)
3504+ {
3505+ rtx ptr, mem;
3506+
3507+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
3508+ REG_PARM_STACK_SPACE (cfun->decl)
3509+ - gp_saved * UNITS_PER_WORD);
3510+ mem = gen_frame_mem (BLKmode, ptr);
3511+ set_mem_alias_set (mem, get_varargs_alias_set ());
3512+
3513+ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
3514+ mem, gp_saved);
3515+ }
3516+ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
3517+ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
3518+}
3519+
3520+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
3521+
3522+static void
3523+riscv_va_start (tree valist, rtx nextarg)
3524+{
3525+ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
3526+ std_expand_builtin_va_start (valist, nextarg);
3527+}
3528+
3529+/* Expand a call of type TYPE. RESULT is where the result will go (null
3530+ for "call"s and "sibcall"s), ADDR is the address of the function,
3531+ ARGS_SIZE is the size of the arguments and AUX is the value passed
3532+ to us by riscv_function_arg. Return the call itself. */
3533+
3534+rtx
3535+riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
3536+{
3537+ rtx pattern;
3538+
3539+ if (!call_insn_operand (addr, VOIDmode))
3540+ {
3541+ rtx reg = RISCV_EPILOGUE_TEMP (Pmode);
3542+ riscv_emit_move (reg, addr);
3543+ addr = reg;
3544+ }
3545+
3546+ if (result == 0)
3547+ {
3548+ rtx (*fn) (rtx, rtx);
3549+
3550+ if (sibcall_p)
3551+ fn = gen_sibcall_internal;
3552+ else
3553+ fn = gen_call_internal;
3554+
3555+ pattern = fn (addr, args_size);
3556+ }
3557+ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3558+ {
3559+ /* Handle return values created by riscv_return_fpr_pair. */
3560+ rtx (*fn) (rtx, rtx, rtx, rtx);
3561+ rtx reg1, reg2;
3562+
3563+ if (sibcall_p)
3564+ fn = gen_sibcall_value_multiple_internal;
3565+ else
3566+ fn = gen_call_value_multiple_internal;
3567+
3568+ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3569+ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3570+ pattern = fn (reg1, addr, args_size, reg2);
3571+ }
3572+ else
3573+ {
3574+ rtx (*fn) (rtx, rtx, rtx);
3575+
3576+ if (sibcall_p)
3577+ fn = gen_sibcall_value_internal;
3578+ else
3579+ fn = gen_call_value_internal;
3580+
3581+ /* Handle return values created by riscv_return_fpr_single. */
3582+ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
3583+ result = XEXP (XVECEXP (result, 0, 0), 0);
3584+ pattern = fn (result, addr, args_size);
3585+ }
3586+
3587+ return emit_call_insn (pattern);
3588+}
3589+
3590+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3591+ Assume that the areas do not overlap. */
3592+
3593+static void
3594+riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3595+{
3596+ HOST_WIDE_INT offset, delta;
3597+ unsigned HOST_WIDE_INT bits;
3598+ int i;
3599+ enum machine_mode mode;
3600+ rtx *regs;
3601+
3602+ bits = MAX( BITS_PER_UNIT,
3603+ MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
3604+
3605+ mode = mode_for_size (bits, MODE_INT, 0);
3606+ delta = bits / BITS_PER_UNIT;
3607+
3608+ /* Allocate a buffer for the temporary registers. */
3609+ regs = XALLOCAVEC (rtx, length / delta);
3610+
3611+ /* Load as many BITS-sized chunks as possible. Use a normal load if
3612+ the source has enough alignment, otherwise use left/right pairs. */
3613+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3614+ {
3615+ regs[i] = gen_reg_rtx (mode);
3616+ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
3617+ }
3618+
3619+ /* Copy the chunks to the destination. */
3620+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3621+ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
3622+
3623+ /* Mop up any left-over bytes. */
3624+ if (offset < length)
3625+ {
3626+ src = adjust_address (src, BLKmode, offset);
3627+ dest = adjust_address (dest, BLKmode, offset);
3628+ move_by_pieces (dest, src, length - offset,
3629+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3630+ }
3631+}
3632+
3633+/* Helper function for doing a loop-based block operation on memory
3634+ reference MEM. Each iteration of the loop will operate on LENGTH
3635+ bytes of MEM.
3636+
3637+ Create a new base register for use within the loop and point it to
3638+ the start of MEM. Create a new memory reference that uses this
3639+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3640+
3641+static void
3642+riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3643+ rtx *loop_reg, rtx *loop_mem)
3644+{
3645+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3646+
3647+ /* Although the new mem does not refer to a known location,
3648+ it does keep up to LENGTH bytes of alignment. */
3649+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
3650+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3651+}
3652+
3653+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
3654+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
3655+ the memory regions do not overlap. */
3656+
3657+static void
3658+riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
3659+ HOST_WIDE_INT bytes_per_iter)
3660+{
3661+ rtx label, src_reg, dest_reg, final_src, test;
3662+ HOST_WIDE_INT leftover;
3663+
3664+ leftover = length % bytes_per_iter;
3665+ length -= leftover;
3666+
3667+ /* Create registers and memory references for use within the loop. */
3668+ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
3669+ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
3670+
3671+ /* Calculate the value that SRC_REG should have after the last iteration
3672+ of the loop. */
3673+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3674+ 0, 0, OPTAB_WIDEN);
3675+
3676+ /* Emit the start of the loop. */
3677+ label = gen_label_rtx ();
3678+ emit_label (label);
3679+
3680+ /* Emit the loop body. */
3681+ riscv_block_move_straight (dest, src, bytes_per_iter);
3682+
3683+ /* Move on to the next block. */
3684+ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
3685+ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
3686+
3687+ /* Emit the loop condition. */
3688+ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
3689+ if (Pmode == DImode)
3690+ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
3691+ else
3692+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
3693+
3694+ /* Mop up any left-over bytes. */
3695+ if (leftover)
3696+ riscv_block_move_straight (dest, src, leftover);
3697+}
3698+
3699+/* Expand a movmemsi instruction, which copies LENGTH bytes from
3700+ memory reference SRC to memory reference DEST. */
3701+
3702+bool
3703+riscv_expand_block_move (rtx dest, rtx src, rtx length)
3704+{
3705+ if (CONST_INT_P (length))
3706+ {
3707+ HOST_WIDE_INT factor, align;
3708+
3709+ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
3710+ factor = BITS_PER_WORD / align;
3711+
3712+ if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
3713+ {
3714+ riscv_block_move_straight (dest, src, INTVAL (length));
3715+ return true;
3716+ }
3717+ else if (optimize && align >= BITS_PER_WORD)
3718+ {
3719+ riscv_block_move_loop (dest, src, INTVAL (length),
3720+ RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
3721+ return true;
3722+ }
3723+ }
3724+ return false;
3725+}
3726+
3727+/* (Re-)Initialize riscv_lo_relocs and riscv_hi_relocs. */
3728+
3729+static void
3730+riscv_init_relocs (void)
3731+{
3732+ memset (riscv_hi_relocs, '\0', sizeof (riscv_hi_relocs));
3733+ memset (riscv_lo_relocs, '\0', sizeof (riscv_lo_relocs));
3734+
3735+ if (!flag_pic)
3736+ {
3737+ riscv_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
3738+ riscv_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
3739+ }
3740+
3741+ if (!flag_pic || flag_pie)
3742+ {
3743+ riscv_hi_relocs[SYMBOL_TLS_LE] = "%tprel_hi(";
3744+ riscv_lo_relocs[SYMBOL_TLS_LE] = "%tprel_lo(";
3745+ }
3746+}
3747+
3748+/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
3749+ in context CONTEXT. RELOCS is the array of relocations to use. */
3750+
3751+static void
3752+riscv_print_operand_reloc (FILE *file, rtx op, const char **relocs)
3753+{
3754+ enum riscv_symbol_type symbol_type;
3755+ const char *p;
3756+
3757+ symbol_type = riscv_classify_symbolic_expression (op);
3758+ gcc_assert (relocs[symbol_type]);
3759+
3760+ fputs (relocs[symbol_type], file);
3761+ output_addr_const (file, riscv_strip_unspec_address (op));
3762+ for (p = relocs[symbol_type]; *p != 0; p++)
3763+ if (*p == '(')
3764+ fputc (')', file);
3765+}
3766+
3767+static const char *
3768+riscv_memory_model_suffix (enum memmodel model)
3769+{
3770+ switch (model)
3771+ {
3772+ case MEMMODEL_ACQ_REL:
3773+ case MEMMODEL_SEQ_CST:
3774+ return ".sc";
3775+ case MEMMODEL_ACQUIRE:
3776+ case MEMMODEL_CONSUME:
3777+ return ".aq";
3778+ case MEMMODEL_RELEASE:
3779+ return ".rl";
3780+ case MEMMODEL_RELAXED:
3781+ return "";
3782+ default: gcc_unreachable();
3783+ }
3784+}
3785+
3786+/* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
3787+
3788+ 'h' Print the high-part relocation associated with OP, after stripping
3789+ any outermost HIGH.
3790+ 'R' Print the low-part relocation associated with OP.
3791+ 'C' Print the integer branch condition for comparison OP.
3792+ 'A' Print the atomic operation suffix for memory model OP.
3793+ 'z' Print $0 if OP is zero, otherwise print OP normally. */
3794+
3795+static void
3796+riscv_print_operand (FILE *file, rtx op, int letter)
3797+{
3798+ enum rtx_code code;
3799+
3800+ gcc_assert (op);
3801+ code = GET_CODE (op);
3802+
3803+ switch (letter)
3804+ {
3805+ case 'h':
3806+ if (code == HIGH)
3807+ op = XEXP (op, 0);
3808+ riscv_print_operand_reloc (file, op, riscv_hi_relocs);
3809+ break;
3810+
3811+ case 'R':
3812+ riscv_print_operand_reloc (file, op, riscv_lo_relocs);
3813+ break;
3814+
3815+ case 'C':
3816+ /* The RTL names match the instruction names. */
3817+ fputs (GET_RTX_NAME (code), file);
3818+ break;
3819+
3820+ case 'A':
3821+ fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
3822+ break;
3823+
3824+ default:
3825+ switch (code)
3826+ {
3827+ case REG:
3828+ if (letter && letter != 'z')
3829+ output_operand_lossage ("invalid use of '%%%c'", letter);
3830+ fprintf (file, "%s", reg_names[REGNO (op)]);
3831+ break;
3832+
3833+ case MEM:
3834+ if (letter == 'y')
3835+ fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
3836+ else if (letter && letter != 'z')
3837+ output_operand_lossage ("invalid use of '%%%c'", letter);
3838+ else
3839+ output_address (XEXP (op, 0));
3840+ break;
3841+
3842+ default:
3843+ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
3844+ fputs (reg_names[GP_REG_FIRST], file);
3845+ else if (letter && letter != 'z')
3846+ output_operand_lossage ("invalid use of '%%%c'", letter);
3847+ else
3848+ output_addr_const (file, riscv_strip_unspec_address (op));
3849+ break;
3850+ }
3851+ }
3852+}
3853+
3854+/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
3855+
3856+static void
3857+riscv_print_operand_address (FILE *file, rtx x)
3858+{
3859+ struct riscv_address_info addr;
3860+
3861+ if (riscv_classify_address (&addr, x, word_mode, true))
3862+ switch (addr.type)
3863+ {
3864+ case ADDRESS_REG:
3865+ riscv_print_operand (file, addr.offset, 0);
3866+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
3867+ return;
3868+
3869+ case ADDRESS_LO_SUM:
3870+ riscv_print_operand_reloc (file, addr.offset, riscv_lo_relocs);
3871+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
3872+ return;
3873+
3874+ case ADDRESS_CONST_INT:
3875+ output_addr_const (file, x);
3876+ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
3877+ return;
3878+
3879+ case ADDRESS_SYMBOLIC:
3880+ output_addr_const (file, riscv_strip_unspec_address (x));
3881+ return;
3882+ }
3883+ gcc_unreachable ();
3884+}
3885+
3886+static bool
3887+riscv_size_ok_for_small_data_p (int size)
3888+{
3889+ return g_switch_value && IN_RANGE (size, 1, g_switch_value);
3890+}
3891+
3892+/* Return true if EXP should be placed in the small data section. */
3893+
3894+static bool
3895+riscv_in_small_data_p (const_tree x)
3896+{
3897+ if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
3898+ return false;
3899+
3900+ if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
3901+ {
3902+ const char *sec = TREE_STRING_POINTER (DECL_SECTION_NAME (x));
3903+ return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
3904+ }
3905+
3906+ return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
3907+}
3908+
3909+/* Return a section for X, handling small data. */
3910+
3911+static section *
3912+riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
3913+ unsigned HOST_WIDE_INT align)
3914+{
3915+ section *s = default_elf_select_rtx_section (mode, x, align);
3916+
3917+ if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
3918+ {
3919+ if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
3920+ {
3921+ /* Rename .rodata.cst* to .srodata.cst*. */
3922+ char name[32];
3923+ sprintf (name, ".s%s", s->named.name + 1);
3924+ return get_section (name, s->named.common.flags, NULL);
3925+ }
3926+
3927+ if (s == data_section)
3928+ return sdata_section;
3929+ }
3930+
3931+ return s;
3932+}
3933+
3934+/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
3935+
3936+static void ATTRIBUTE_UNUSED
3937+riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
3938+{
3939+ switch (size)
3940+ {
3941+ case 4:
3942+ fputs ("\t.dtprelword\t", file);
3943+ break;
3944+
3945+ case 8:
3946+ fputs ("\t.dtpreldword\t", file);
3947+ break;
3948+
3949+ default:
3950+ gcc_unreachable ();
3951+ }
3952+ output_addr_const (file, x);
3953+ fputs ("+0x800", file);
3954+}
3955+
3956+/* Make the last instruction frame-related and note that it performs
3957+ the operation described by FRAME_PATTERN. */
3958+
3959+static void
3960+riscv_set_frame_expr (rtx frame_pattern)
3961+{
3962+ rtx insn;
3963+
3964+ insn = get_last_insn ();
3965+ RTX_FRAME_RELATED_P (insn) = 1;
3966+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3967+ frame_pattern,
3968+ REG_NOTES (insn));
3969+}
3970+
3971+/* Return a frame-related rtx that stores REG at MEM.
3972+ REG must be a single register. */
3973+
3974+static rtx
3975+riscv_frame_set (rtx mem, rtx reg)
3976+{
3977+ rtx set;
3978+
3979+ set = gen_rtx_SET (VOIDmode, mem, reg);
3980+ RTX_FRAME_RELATED_P (set) = 1;
3981+
3982+ return set;
3983+}
3984+
3985+/* Return true if the current function must save register REGNO. */
3986+
3987+static bool
3988+riscv_save_reg_p (unsigned int regno)
3989+{
3990+ bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
3991+ bool might_clobber = crtl->saves_all_registers
3992+ || df_regs_ever_live_p (regno)
3993+ || (regno == HARD_FRAME_POINTER_REGNUM
3994+ && frame_pointer_needed);
3995+
3996+ return (call_saved && might_clobber)
3997+ || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
3998+}
3999+
4000+/* Populate the current function's riscv_frame_info structure.
4001+
4002+ RISC-V stack frames grown downward. High addresses are at the top.
4003+
4004+ +-------------------------------+
4005+ | |
4006+ | incoming stack arguments |
4007+ | |
4008+ +-------------------------------+ <-- incoming stack pointer
4009+ | |
4010+ | callee-allocated save area |
4011+ | for arguments that are |
4012+ | split between registers and |
4013+ | the stack |
4014+ | |
4015+ +-------------------------------+ <-- arg_pointer_rtx
4016+ | |
4017+ | callee-allocated save area |
4018+ | for register varargs |
4019+ | |
4020+ +-------------------------------+ <-- hard_frame_pointer_rtx;
4021+ | | stack_pointer_rtx + gp_sp_offset
4022+ | GPR save area | + UNITS_PER_WORD
4023+ | |
4024+ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
4025+ | | + UNITS_PER_HWVALUE
4026+ | FPR save area |
4027+ | |
4028+ +-------------------------------+ <-- frame_pointer_rtx (virtual)
4029+ | |
4030+ | local variables |
4031+ | |
4032+ P +-------------------------------+
4033+ | |
4034+ | outgoing stack arguments |
4035+ | |
4036+ +-------------------------------+ <-- stack_pointer_rtx
4037+
4038+ Dynamic stack allocations such as alloca insert data at point P.
4039+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
4040+ hard_frame_pointer_rtx unchanged. */
4041+
4042+static void
4043+riscv_compute_frame_info (void)
4044+{
4045+ struct riscv_frame_info *frame;
4046+ HOST_WIDE_INT offset;
4047+ unsigned int regno, i;
4048+
4049+ frame = &cfun->machine->frame;
4050+ memset (frame, 0, sizeof (*frame));
4051+
4052+ /* Find out which GPRs we need to save. */
4053+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
4054+ if (riscv_save_reg_p (regno))
4055+ frame->mask |= 1 << (regno - GP_REG_FIRST);
4056+
4057+ /* If this function calls eh_return, we must also save and restore the
4058+ EH data registers. */
4059+ if (crtl->calls_eh_return)
4060+ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
4061+ frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
4062+
4063+ /* Find out which FPRs we need to save. This loop must iterate over
4064+ the same space as its companion in riscv_for_each_saved_gpr_and_fpr. */
4065+ if (TARGET_HARD_FLOAT)
4066+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4067+ if (riscv_save_reg_p (regno))
4068+ frame->fmask |= 1 << (regno - FP_REG_FIRST);
4069+
4070+ /* At the bottom of the frame are any outgoing stack arguments. */
4071+ offset = crtl->outgoing_args_size;
4072+ /* Next are local stack variables. */
4073+ offset += RISCV_STACK_ALIGN (get_frame_size ());
4074+ /* The virtual frame pointer points above the local variables. */
4075+ frame->frame_pointer_offset = offset;
4076+ /* Next are the callee-saved FPRs. */
4077+ if (frame->fmask)
4078+ {
4079+ unsigned num_saved = __builtin_popcount(frame->fmask);
4080+ offset += RISCV_STACK_ALIGN (num_saved * UNITS_PER_FPREG);
4081+ frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
4082+ }
4083+ /* Next are the callee-saved GPRs. */
4084+ if (frame->mask)
4085+ {
4086+ unsigned num_saved = __builtin_popcount(frame->mask);
4087+ offset += RISCV_STACK_ALIGN (num_saved * UNITS_PER_WORD);
4088+ frame->gp_sp_offset = offset - UNITS_PER_WORD;
4089+ }
4090+ /* The hard frame pointer points above the callee-saved GPRs. */
4091+ frame->hard_frame_pointer_offset = offset;
4092+ /* Above the hard frame pointer is the callee-allocated varags save area. */
4093+ offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
4094+ frame->arg_pointer_offset = offset;
4095+ /* Next is the callee-allocated area for pretend stack arguments. */
4096+ offset += crtl->args.pretend_args_size;
4097+ frame->total_size = offset;
4098+ /* Next points the incoming stack pointer and any incoming arguments. */
4099+}
4100+
4101+/* Make sure that we're not trying to eliminate to the wrong hard frame
4102+ pointer. */
4103+
4104+static bool
4105+riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
4106+{
4107+ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
4108+}
4109+
4110+/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
4111+ or argument pointer. TO is either the stack pointer or hard frame
4112+ pointer. */
4113+
4114+HOST_WIDE_INT
4115+riscv_initial_elimination_offset (int from, int to)
4116+{
4117+ HOST_WIDE_INT src, dest;
4118+
4119+ riscv_compute_frame_info ();
4120+
4121+ if (to == HARD_FRAME_POINTER_REGNUM)
4122+ dest = cfun->machine->frame.hard_frame_pointer_offset;
4123+ else if (to == STACK_POINTER_REGNUM)
4124+ dest = 0; /* this is the base of all offsets */
4125+ else
4126+ gcc_unreachable ();
4127+
4128+ if (from == FRAME_POINTER_REGNUM)
4129+ src = cfun->machine->frame.frame_pointer_offset;
4130+ else if (from == ARG_POINTER_REGNUM)
4131+ src = cfun->machine->frame.arg_pointer_offset;
4132+ else
4133+ gcc_unreachable ();
4134+
4135+ return src - dest;
4136+}
4137+
4138+/* Implement RETURN_ADDR_RTX. We do not support moving back to a
4139+ previous frame. */
4140+
4141+rtx
4142+riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4143+{
4144+ if (count != 0)
4145+ return const0_rtx;
4146+
4147+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
4148+}
4149+
4150+/* Emit code to change the current function's return address to
4151+ ADDRESS. SCRATCH is available as a scratch register, if needed.
4152+ ADDRESS and SCRATCH are both word-mode GPRs. */
4153+
4154+void
4155+riscv_set_return_address (rtx address, rtx scratch)
4156+{
4157+ rtx slot_address;
4158+
4159+ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
4160+ slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
4161+ cfun->machine->frame.gp_sp_offset);
4162+ riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
4163+}
4164+
4165+/* A function to save or store a register. The first argument is the
4166+ register and the second is the stack slot. */
4167+typedef void (*riscv_save_restore_fn) (rtx, rtx);
4168+
4169+/* Use FN to save or restore register REGNO. MODE is the register's
4170+ mode and OFFSET is the offset of its save slot from the current
4171+ stack pointer. */
4172+
4173+static void
4174+riscv_save_restore_reg (enum machine_mode mode, int regno,
4175+ HOST_WIDE_INT offset, riscv_save_restore_fn fn)
4176+{
4177+ rtx mem;
4178+
4179+ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
4180+ fn (gen_rtx_REG (mode, regno), mem);
4181+}
4182+
4183+/* Call FN for each register that is saved by the current function.
4184+ SP_OFFSET is the offset of the current stack pointer from the start
4185+ of the frame. */
4186+
4187+static void
4188+riscv_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
4189+ riscv_save_restore_fn fn)
4190+{
4191+ HOST_WIDE_INT offset;
4192+ int regno;
4193+
4194+ /* Save the link register and s-registers. */
4195+ offset = cfun->machine->frame.gp_sp_offset - sp_offset;
4196+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
4197+ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
4198+ {
4199+ riscv_save_restore_reg (word_mode, regno, offset, fn);
4200+ offset -= UNITS_PER_WORD;
4201+ }
4202+
4203+ /* This loop must iterate over the same space as its companion in
4204+ riscv_compute_frame_info. */
4205+ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
4206+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4207+ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
4208+ {
4209+ riscv_save_restore_reg (DFmode, regno, offset, fn);
4210+ offset -= GET_MODE_SIZE (DFmode);
4211+ }
4212+}
4213+
4214+/* Emit a move from SRC to DEST, given that one of them is a register
4215+ save slot and that the other is a register. TEMP is a temporary
4216+ GPR of the same mode that is available if need be. */
4217+
4218+static void
4219+riscv_emit_save_slot_move (rtx dest, rtx src, rtx temp)
4220+{
4221+ unsigned int regno;
4222+ rtx mem;
4223+ enum reg_class rclass;
4224+
4225+ if (REG_P (src))
4226+ {
4227+ regno = REGNO (src);
4228+ mem = dest;
4229+ }
4230+ else
4231+ {
4232+ regno = REGNO (dest);
4233+ mem = src;
4234+ }
4235+
4236+ rclass = riscv_secondary_reload_class (REGNO_REG_CLASS (regno),
4237+ GET_MODE (mem), mem, mem == src);
4238+
4239+ if (rclass == NO_REGS)
4240+ riscv_emit_move (dest, src);
4241+ else
4242+ {
4243+ gcc_assert (!reg_overlap_mentioned_p (dest, temp));
4244+ riscv_emit_move (temp, src);
4245+ riscv_emit_move (dest, temp);
4246+ }
4247+ if (MEM_P (dest))
4248+ riscv_set_frame_expr (riscv_frame_set (dest, src));
4249+}
4250+
4251+/* Save register REG to MEM. Make the instruction frame-related. */
4252+
4253+static void
4254+riscv_save_reg (rtx reg, rtx mem)
4255+{
4256+ riscv_emit_save_slot_move (mem, reg, RISCV_PROLOGUE_TEMP (GET_MODE (reg)));
4257+}
4258+
4259+
4260+/* Expand the "prologue" pattern. */
4261+
4262+void
4263+riscv_expand_prologue (void)
4264+{
4265+ const struct riscv_frame_info *frame;
4266+ HOST_WIDE_INT size;
4267+ rtx insn;
4268+
4269+ frame = &cfun->machine->frame;
4270+ size = frame->total_size;
4271+
4272+ if (flag_stack_usage_info)
4273+ current_function_static_stack_size = size;
4274+
4275+ /* Save the registers. Allocate up to RISCV_MAX_FIRST_STACK_STEP
4276+ bytes beforehand; this is enough to cover the register save area
4277+ without going out of range. */
4278+ if ((frame->mask | frame->fmask) != 0)
4279+ {
4280+ HOST_WIDE_INT step1;
4281+
4282+ step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
4283+ insn = gen_add3_insn (stack_pointer_rtx,
4284+ stack_pointer_rtx,
4285+ GEN_INT (-step1));
4286+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
4287+ size -= step1;
4288+ riscv_for_each_saved_gpr_and_fpr (size, riscv_save_reg);
4289+ }
4290+
4291+ /* Set up the frame pointer, if we're using one. */
4292+ if (frame_pointer_needed)
4293+ {
4294+ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
4295+ GEN_INT (frame->hard_frame_pointer_offset - size));
4296+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
4297+ }
4298+
4299+ /* Allocate the rest of the frame. */
4300+ if (size > 0)
4301+ {
4302+ if (SMALL_OPERAND (-size))
4303+ RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
4304+ stack_pointer_rtx,
4305+ GEN_INT (-size)))) = 1;
4306+ else
4307+ {
4308+ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (size));
4309+ emit_insn (gen_sub3_insn (stack_pointer_rtx,
4310+ stack_pointer_rtx,
4311+ RISCV_PROLOGUE_TEMP (Pmode)));
4312+
4313+ /* Describe the combined effect of the previous instructions. */
4314+ riscv_set_frame_expr
4315+ (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
4316+ plus_constant (Pmode, stack_pointer_rtx, -size)));
4317+ }
4318+ }
4319+}
4320+
4321+/* Emit instructions to restore register REG from slot MEM. */
4322+
4323+static void
4324+riscv_restore_reg (rtx reg, rtx mem)
4325+{
4326+ riscv_emit_save_slot_move (reg, mem, RISCV_EPILOGUE_TEMP (GET_MODE (reg)));
4327+}
4328+
4329+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
4330+ says which. */
4331+
4332+void
4333+riscv_expand_epilogue (bool sibcall_p)
4334+{
4335+ const struct riscv_frame_info *frame;
4336+ HOST_WIDE_INT step1, step2;
4337+
4338+ if (!sibcall_p && riscv_can_use_return_insn ())
4339+ {
4340+ emit_jump_insn (gen_return ());
4341+ return;
4342+ }
4343+
4344+ /* Split the frame into two. STEP1 is the amount of stack we should
4345+ deallocate before restoring the registers. STEP2 is the amount we
4346+ should deallocate afterwards.
4347+
4348+ Start off by assuming that no registers need to be restored. */
4349+ frame = &cfun->machine->frame;
4350+ step1 = frame->total_size;
4351+ step2 = 0;
4352+
4353+ /* Move past any dynamic stack allocations. */
4354+ if (cfun->calls_alloca)
4355+ {
4356+ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
4357+ if (!SMALL_INT (adjust))
4358+ {
4359+ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
4360+ adjust = RISCV_EPILOGUE_TEMP (Pmode);
4361+ }
4362+
4363+ emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, adjust));
4364+ }
4365+
4366+ /* If we need to restore registers, deallocate as much stack as
4367+ possible in the second step without going out of range. */
4368+ if ((frame->mask | frame->fmask) != 0)
4369+ {
4370+ step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
4371+ step1 -= step2;
4372+ }
4373+
4374+ /* Set TARGET to BASE + STEP1. */
4375+ if (step1 > 0)
4376+ {
4377+ /* Get an rtx for STEP1 that we can add to BASE. */
4378+ rtx adjust = GEN_INT (step1);
4379+ if (!SMALL_OPERAND (step1))
4380+ {
4381+ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
4382+ adjust = RISCV_EPILOGUE_TEMP (Pmode);
4383+ }
4384+
4385+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
4386+ }
4387+
4388+ /* Restore the registers. */
4389+ riscv_for_each_saved_gpr_and_fpr (frame->total_size - step2,
4390+ riscv_restore_reg);
4391+
4392+ /* Deallocate the final bit of the frame. */
4393+ if (step2 > 0)
4394+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
4395+ GEN_INT (step2)));
4396+
4397+ /* Add in the __builtin_eh_return stack adjustment. */
4398+ if (crtl->calls_eh_return)
4399+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
4400+ EH_RETURN_STACKADJ_RTX));
4401+
4402+ if (!sibcall_p)
4403+ {
4404+ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
4405+ emit_jump_insn (gen_simple_return_internal (ra));
4406+ }
4407+}
4408+
4409+/* Return nonzero if this function is known to have a null epilogue.
4410+ This allows the optimizer to omit jumps to jumps if no stack
4411+ was created. */
4412+
4413+bool
4414+riscv_can_use_return_insn (void)
4415+{
4416+ return reload_completed && cfun->machine->frame.total_size == 0;
4417+}
4418+
4419+/* Return true if register REGNO can store a value of mode MODE.
4420+ The result of this function is cached in riscv_hard_regno_mode_ok. */
4421+
4422+static bool
4423+riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
4424+{
4425+ unsigned int size = GET_MODE_SIZE (mode);
4426+ enum mode_class mclass = GET_MODE_CLASS (mode);
4427+
4428+ /* This is hella bogus but ira_build segfaults on RV32 without it. */
4429+ if (VECTOR_MODE_P (mode))
4430+ return true;
4431+
4432+ if (GP_REG_P (regno))
4433+ {
4434+ if (size <= UNITS_PER_WORD)
4435+ return true;
4436+
4437+ /* Double-word values must be even-register-aligned. */
4438+ if (size <= 2 * UNITS_PER_WORD)
4439+ return regno % 2 == 0;
4440+ }
4441+
4442+ if (FP_REG_P (regno))
4443+ {
4444+ if (mclass == MODE_FLOAT
4445+ || mclass == MODE_COMPLEX_FLOAT
4446+ || mclass == MODE_VECTOR_FLOAT)
4447+ return size <= UNITS_PER_FPVALUE;
4448+ }
4449+
4450+ return false;
4451+}
4452+
4453+/* Implement HARD_REGNO_NREGS. */
4454+
4455+unsigned int
4456+riscv_hard_regno_nregs (int regno, enum machine_mode mode)
4457+{
4458+ if (FP_REG_P (regno))
4459+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
4460+
4461+ /* All other registers are word-sized. */
4462+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4463+}
4464+
4465+/* Implement CLASS_MAX_NREGS, taking the maximum of the cases
4466+ in riscv_hard_regno_nregs. */
4467+
4468+int
4469+riscv_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
4470+{
4471+ int size;
4472+ HARD_REG_SET left;
4473+
4474+ size = 0x8000;
4475+ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
4476+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
4477+ {
4478+ size = MIN (size, UNITS_PER_FPREG);
4479+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
4480+ }
4481+ if (!hard_reg_set_empty_p (left))
4482+ size = MIN (size, UNITS_PER_WORD);
4483+ return (GET_MODE_SIZE (mode) + size - 1) / size;
4484+}
4485+
4486+/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
4487+
4488+static reg_class_t
4489+riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
4490+{
4491+ return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
4492+ reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
4493+ rclass;
4494+}
4495+
4496+/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
4497+ Return a "canonical" class to represent it in later calculations. */
4498+
4499+static reg_class_t
4500+riscv_canonicalize_move_class (reg_class_t rclass)
4501+{
4502+ if (reg_class_subset_p (rclass, GENERAL_REGS))
4503+ rclass = GENERAL_REGS;
4504+
4505+ return rclass;
4506+}
4507+
4508+/* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
4509+ maximum of the move costs for subclasses; regclass will work out
4510+ the maximum for us. */
4511+
4512+static int
4513+riscv_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
4514+ reg_class_t from, reg_class_t to)
4515+{
4516+ from = riscv_canonicalize_move_class (from);
4517+ to = riscv_canonicalize_move_class (to);
4518+
4519+ if ((from == GENERAL_REGS && to == GENERAL_REGS)
4520+ || (from == GENERAL_REGS && to == FP_REGS)
4521+ || (from == FP_REGS && to == FP_REGS))
4522+ return COSTS_N_INSNS (1);
4523+
4524+ if (from == FP_REGS && to == GENERAL_REGS)
4525+ return tune_info->fp_to_int_cost;
4526+
4527+ return 0;
4528+}
4529+
4530+/* Implement TARGET_MEMORY_MOVE_COST. */
4531+
4532+static int
4533+riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
4534+{
4535+ return (tune_info->memory_cost
4536+ + memory_move_secondary_cost (mode, rclass, in));
4537+}
4538+
4539+/* Return the register class required for a secondary register when
4540+ copying between one of the registers in RCLASS and value X, which
4541+ has mode MODE. X is the source of the move if IN_P, otherwise it
4542+ is the destination. Return NO_REGS if no secondary register is
4543+ needed. */
4544+
4545+enum reg_class
4546+riscv_secondary_reload_class (enum reg_class rclass,
4547+ enum machine_mode mode, rtx x,
4548+ bool in_p ATTRIBUTE_UNUSED)
4549+{
4550+ int regno;
4551+
4552+ regno = true_regnum (x);
4553+
4554+ if (reg_class_subset_p (rclass, FP_REGS))
4555+ {
4556+ if (MEM_P (x) && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
4557+ /* We can use flw/fld/fsw/fsd. */
4558+ return NO_REGS;
4559+
4560+ if (GP_REG_P (regno) || x == CONST0_RTX (mode))
4561+ /* We can use fmv or go through memory when mode > Pmode. */
4562+ return NO_REGS;
4563+
4564+ if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
4565+ /* We can force the constant to memory and use flw/fld. */
4566+ return NO_REGS;
4567+
4568+ if (FP_REG_P (regno))
4569+ /* We can use fmv.fmt. */
4570+ return NO_REGS;
4571+
4572+ /* Otherwise, we need to reload through an integer register. */
4573+ return GR_REGS;
4574+ }
4575+ if (FP_REG_P (regno))
4576+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
4577+
4578+ return NO_REGS;
4579+}
4580+
4581+/* Implement TARGET_MODE_REP_EXTENDED. */
4582+
4583+static int
4584+riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
4585+{
4586+ /* On 64-bit targets, SImode register values are sign-extended to DImode. */
4587+ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
4588+ return SIGN_EXTEND;
4589+
4590+ return UNKNOWN;
4591+}
4592+
4593+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
4594+
4595+static bool
4596+riscv_scalar_mode_supported_p (enum machine_mode mode)
4597+{
4598+ if (ALL_FIXED_POINT_MODE_P (mode)
4599+ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
4600+ return true;
4601+
4602+ return default_scalar_mode_supported_p (mode);
4603+}
4604+
4605+/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
4606+ dependencies have no cost. */
4607+
4608+static int
4609+riscv_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
4610+ rtx dep ATTRIBUTE_UNUSED, int cost)
4611+{
4612+ if (REG_NOTE_KIND (link) != 0)
4613+ return 0;
4614+ return cost;
4615+}
4616+
4617+/* Return the number of instructions that can be issued per cycle. */
4618+
4619+static int
4620+riscv_issue_rate (void)
4621+{
4622+ return tune_info->issue_rate;
4623+}
4624+
4625+/* This structure describes a single built-in function. */
4626+struct riscv_builtin_description {
4627+ /* The code of the main .md file instruction. See riscv_builtin_type
4628+ for more information. */
4629+ enum insn_code icode;
4630+
4631+ /* The name of the built-in function. */
4632+ const char *name;
4633+
4634+ /* Specifies how the function should be expanded. */
4635+ enum riscv_builtin_type builtin_type;
4636+
4637+ /* The function's prototype. */
4638+ enum riscv_function_type function_type;
4639+
4640+ /* Whether the function is available. */
4641+ unsigned int (*avail) (void);
4642+};
4643+
4644+static unsigned int
4645+riscv_builtin_avail_riscv (void)
4646+{
4647+ return 1;
4648+}
4649+
4650+/* Construct a riscv_builtin_description from the given arguments.
4651+
4652+ INSN is the name of the associated instruction pattern, without the
4653+ leading CODE_FOR_riscv_.
4654+
4655+ CODE is the floating-point condition code associated with the
4656+ function. It can be 'f' if the field is not applicable.
4657+
4658+ NAME is the name of the function itself, without the leading
4659+ "__builtin_riscv_".
4660+
4661+ BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
4662+
4663+ AVAIL is the name of the availability predicate, without the leading
4664+ riscv_builtin_avail_. */
4665+#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
4666+ { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME, \
4667+ BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
4668+
4669+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
4670+ mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE and AVAIL
4671+ are as for RISCV_BUILTIN. */
4672+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
4673+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
4674+
4675+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
4676+ function mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE
4677+ and AVAIL are as for RISCV_BUILTIN. */
4678+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
4679+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET, \
4680+ FUNCTION_TYPE, AVAIL)
4681+
4682+static const struct riscv_builtin_description riscv_builtins[] = {
4683+ DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
4684+};
4685+
4686+/* Index I is the function declaration for riscv_builtins[I], or null if the
4687+ function isn't defined on this target. */
4688+static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
4689+
4690+
4691+/* Source-level argument types. */
4692+#define RISCV_ATYPE_VOID void_type_node
4693+#define RISCV_ATYPE_INT integer_type_node
4694+#define RISCV_ATYPE_POINTER ptr_type_node
4695+#define RISCV_ATYPE_CPOINTER const_ptr_type_node
4696+
4697+/* Standard mode-based argument types. */
4698+#define RISCV_ATYPE_UQI unsigned_intQI_type_node
4699+#define RISCV_ATYPE_SI intSI_type_node
4700+#define RISCV_ATYPE_USI unsigned_intSI_type_node
4701+#define RISCV_ATYPE_DI intDI_type_node
4702+#define RISCV_ATYPE_UDI unsigned_intDI_type_node
4703+#define RISCV_ATYPE_SF float_type_node
4704+#define RISCV_ATYPE_DF double_type_node
4705+
4706+/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
4707+ their associated RISCV_ATYPEs. */
4708+#define RISCV_FTYPE_ATYPES1(A, B) \
4709+ RISCV_ATYPE_##A, RISCV_ATYPE_##B
4710+
4711+#define RISCV_FTYPE_ATYPES2(A, B, C) \
4712+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
4713+
4714+#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
4715+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
4716+
4717+#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
4718+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
4719+ RISCV_ATYPE_##E
4720+
4721+/* Return the function type associated with function prototype TYPE. */
4722+
4723+static tree
4724+riscv_build_function_type (enum riscv_function_type type)
4725+{
4726+ static tree types[(int) RISCV_MAX_FTYPE_MAX];
4727+
4728+ if (types[(int) type] == NULL_TREE)
4729+ switch (type)
4730+ {
4731+#define DEF_RISCV_FTYPE(NUM, ARGS) \
4732+ case RISCV_FTYPE_NAME##NUM ARGS: \
4733+ types[(int) type] \
4734+ = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS, \
4735+ NULL_TREE); \
4736+ break;
4737+#include "config/riscv/riscv-ftypes.def"
4738+#undef DEF_RISCV_FTYPE
4739+ default:
4740+ gcc_unreachable ();
4741+ }
4742+
4743+ return types[(int) type];
4744+}
4745+
4746+/* Implement TARGET_INIT_BUILTINS. */
4747+
4748+static void
4749+riscv_init_builtins (void)
4750+{
4751+ const struct riscv_builtin_description *d;
4752+ unsigned int i;
4753+
4754+ /* Iterate through all of the bdesc arrays, initializing all of the
4755+ builtin functions. */
4756+ for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
4757+ {
4758+ d = &riscv_builtins[i];
4759+ if (d->avail ())
4760+ riscv_builtin_decls[i]
4761+ = add_builtin_function (d->name,
4762+ riscv_build_function_type (d->function_type),
4763+ i, BUILT_IN_MD, NULL, NULL);
4764+ }
4765+}
4766+
4767+/* Implement TARGET_BUILTIN_DECL. */
4768+
4769+static tree
4770+riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
4771+{
4772+ if (code >= ARRAY_SIZE (riscv_builtins))
4773+ return error_mark_node;
4774+ return riscv_builtin_decls[code];
4775+}
4776+
4777+/* Take argument ARGNO from EXP's argument list and convert it into a
4778+ form suitable for input operand OPNO of instruction ICODE. Return the
4779+ value. */
4780+
4781+static rtx
4782+riscv_prepare_builtin_arg (enum insn_code icode,
4783+ unsigned int opno, tree exp, unsigned int argno)
4784+{
4785+ tree arg;
4786+ rtx value;
4787+ enum machine_mode mode;
4788+
4789+ arg = CALL_EXPR_ARG (exp, argno);
4790+ value = expand_normal (arg);
4791+ mode = insn_data[icode].operand[opno].mode;
4792+ if (!insn_data[icode].operand[opno].predicate (value, mode))
4793+ {
4794+ /* We need to get the mode from ARG for two reasons:
4795+
4796+ - to cope with address operands, where MODE is the mode of the
4797+ memory, rather than of VALUE itself.
4798+
4799+ - to cope with special predicates like pmode_register_operand,
4800+ where MODE is VOIDmode. */
4801+ value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
4802+
4803+ /* Check the predicate again. */
4804+ if (!insn_data[icode].operand[opno].predicate (value, mode))
4805+ {
4806+ error ("invalid argument to built-in function");
4807+ return const0_rtx;
4808+ }
4809+ }
4810+
4811+ return value;
4812+}
4813+
4814+/* Return an rtx suitable for output operand OP of instruction ICODE.
4815+ If TARGET is non-null, try to use it where possible. */
4816+
4817+static rtx
4818+riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
4819+{
4820+ enum machine_mode mode;
4821+
4822+ mode = insn_data[icode].operand[op].mode;
4823+ if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
4824+ target = gen_reg_rtx (mode);
4825+
4826+ return target;
4827+}
4828+
4829+/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
4830+ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
4831+ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
4832+ suggests a good place to put the result. */
4833+
4834+static rtx
4835+riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
4836+ bool has_target_p)
4837+{
4838+ rtx ops[MAX_RECOG_OPERANDS];
4839+ int opno, argno;
4840+
4841+ /* Map any target to operand 0. */
4842+ opno = 0;
4843+ if (has_target_p)
4844+ {
4845+ target = riscv_prepare_builtin_target (icode, opno, target);
4846+ ops[opno] = target;
4847+ opno++;
4848+ }
4849+
4850+ /* Map the arguments to the other operands. The n_operands value
4851+ for an expander includes match_dups and match_scratches as well as
4852+ match_operands, so n_operands is only an upper bound on the number
4853+ of arguments to the expander function. */
4854+ gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
4855+ for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
4856+ ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
4857+
4858+ switch (opno)
4859+ {
4860+ case 2:
4861+ emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
4862+ break;
4863+
4864+ case 3:
4865+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
4866+ break;
4867+
4868+ case 4:
4869+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
4870+ break;
4871+
4872+ default:
4873+ gcc_unreachable ();
4874+ }
4875+ return target;
4876+}
4877+
4878+/* Implement TARGET_EXPAND_BUILTIN. */
4879+
4880+static rtx
4881+riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
4882+ enum machine_mode mode ATTRIBUTE_UNUSED,
4883+ int ignore ATTRIBUTE_UNUSED)
4884+{
4885+ tree fndecl;
4886+ unsigned int fcode, avail;
4887+ const struct riscv_builtin_description *d;
4888+
4889+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
4890+ fcode = DECL_FUNCTION_CODE (fndecl);
4891+ gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
4892+ d = &riscv_builtins[fcode];
4893+ avail = d->avail ();
4894+ gcc_assert (avail != 0);
4895+ switch (d->builtin_type)
4896+ {
4897+ case RISCV_BUILTIN_DIRECT:
4898+ return riscv_expand_builtin_direct (d->icode, target, exp, true);
4899+
4900+ case RISCV_BUILTIN_DIRECT_NO_TARGET:
4901+ return riscv_expand_builtin_direct (d->icode, target, exp, false);
4902+ }
4903+ gcc_unreachable ();
4904+}
4905+
4906+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
4907+ in order to avoid duplicating too much logic from elsewhere. */
4908+
4909+static void
4910+riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
4911+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
4912+ tree function)
4913+{
4914+ rtx this_rtx, temp1, temp2, insn, fnaddr;
4915+ bool use_sibcall_p;
4916+
4917+ /* Pretend to be a post-reload pass while generating rtl. */
4918+ reload_completed = 1;
4919+
4920+ /* Mark the end of the (empty) prologue. */
4921+ emit_note (NOTE_INSN_PROLOGUE_END);
4922+
4923+ /* Determine if we can use a sibcall to call FUNCTION directly. */
4924+ fnaddr = XEXP (DECL_RTL (function), 0);
4925+ use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
4926+
4927+ /* We need two temporary registers in some cases. */
4928+ temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
4929+ temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
4930+
4931+ /* Find out which register contains the "this" pointer. */
4932+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
4933+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
4934+ else
4935+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
4936+
4937+ /* Add DELTA to THIS_RTX. */
4938+ if (delta != 0)
4939+ {
4940+ rtx offset = GEN_INT (delta);
4941+ if (!SMALL_OPERAND (delta))
4942+ {
4943+ riscv_emit_move (temp1, offset);
4944+ offset = temp1;
4945+ }
4946+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
4947+ }
4948+
4949+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
4950+ if (vcall_offset != 0)
4951+ {
4952+ rtx addr;
4953+
4954+ /* Set TEMP1 to *THIS_RTX. */
4955+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
4956+
4957+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
4958+ addr = riscv_add_offset (temp2, temp1, vcall_offset);
4959+
4960+ /* Load the offset and add it to THIS_RTX. */
4961+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
4962+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
4963+ }
4964+
4965+ /* Jump to the target function. Use a sibcall if direct jumps are
4966+ allowed, otherwise load the address into a register first. */
4967+ if (use_sibcall_p)
4968+ {
4969+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
4970+ SIBLING_CALL_P (insn) = 1;
4971+ }
4972+ else
4973+ {
4974+ riscv_emit_move(temp1, fnaddr);
4975+ emit_jump_insn (gen_indirect_jump (temp1));
4976+ }
4977+
4978+ /* Run just enough of rest_of_compilation. This sequence was
4979+ "borrowed" from alpha.c. */
4980+ insn = get_insns ();
4981+ split_all_insns_noflow ();
4982+ shorten_branches (insn);
4983+ final_start_function (insn, file, 1);
4984+ final (insn, file, 1);
4985+ final_end_function ();
4986+
4987+ /* Clean up the vars set above. Note that final_end_function resets
4988+ the global pointer for us. */
4989+ reload_completed = 0;
4990+}
4991+
4992+/* Allocate a chunk of memory for per-function machine-dependent data. */
4993+
4994+static struct machine_function *
4995+riscv_init_machine_status (void)
4996+{
4997+ return ggc_alloc_cleared_machine_function ();
4998+}
4999+
5000+/* Implement TARGET_OPTION_OVERRIDE. */
5001+
5002+static void
5003+riscv_option_override (void)
5004+{
5005+ int regno, mode;
5006+ const struct riscv_cpu_info *cpu;
5007+
5008+#ifdef SUBTARGET_OVERRIDE_OPTIONS
5009+ SUBTARGET_OVERRIDE_OPTIONS;
5010+#endif
5011+
5012+ flag_pcc_struct_return = 0;
5013+
5014+ if (flag_pic)
5015+ g_switch_value = 0;
5016+
5017+ /* Prefer a call to memcpy over inline code when optimizing for size,
5018+ though see MOVE_RATIO in riscv.h. */
5019+ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
5020+ target_flags |= MASK_MEMCPY;
5021+
5022+ /* Handle -mtune. */
5023+ cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
5024+ RISCV_TUNE_STRING_DEFAULT);
5025+ tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
5026+
5027+ /* If the user hasn't specified a branch cost, use the processor's
5028+ default. */
5029+ if (riscv_branch_cost == 0)
5030+ riscv_branch_cost = tune_info->branch_cost;
5031+
5032+ /* Set up riscv_hard_regno_mode_ok. */
5033+ for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
5034+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5035+ riscv_hard_regno_mode_ok[mode][regno]
5036+ = riscv_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
5037+
5038+ /* Function to allocate machine-dependent function status. */
5039+ init_machine_status = &riscv_init_machine_status;
5040+
5041+ riscv_init_relocs ();
5042+}
5043+
5044+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
5045+
5046+static void
5047+riscv_conditional_register_usage (void)
5048+{
5049+ int regno;
5050+
5051+ if (!TARGET_HARD_FLOAT)
5052+ {
5053+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5054+ fixed_regs[regno] = call_used_regs[regno] = 1;
5055+ }
5056+}
5057+
5058+/* Implement TARGET_TRAMPOLINE_INIT. */
5059+
5060+static void
5061+riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5062+{
5063+ rtx addr, end_addr, mem;
5064+ rtx trampoline[4];
5065+ unsigned int i;
5066+ HOST_WIDE_INT static_chain_offset, target_function_offset;
5067+
5068+ /* Work out the offsets of the pointers from the start of the
5069+ trampoline code. */
5070+ gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
5071+ static_chain_offset = TRAMPOLINE_CODE_SIZE;
5072+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
5073+
5074+ /* Get pointers to the beginning and end of the code block. */
5075+ addr = force_reg (Pmode, XEXP (m_tramp, 0));
5076+ end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
5077+
5078+#define OP(X) gen_int_mode (X, SImode)
5079+#define MATCH_LREG ((Pmode) == DImode ? MATCH_LD : MATCH_LW)
5080+
5081+ /* auipc t0, 0
5082+ l[wd] t1, target_function_offset(t0)
5083+ l[wd] $static_chain, static_chain_offset(t0)
5084+ jr t1
5085+ */
5086+
5087+ trampoline[0] = OP (RISCV_UTYPE (AUIPC, STATIC_CHAIN_REGNUM, 0));
5088+ trampoline[1] = OP (RISCV_ITYPE (LREG, RISCV_PROLOGUE_TEMP_REGNUM,
5089+ STATIC_CHAIN_REGNUM, target_function_offset));
5090+ trampoline[2] = OP (RISCV_ITYPE (LREG, STATIC_CHAIN_REGNUM,
5091+ STATIC_CHAIN_REGNUM, static_chain_offset));
5092+ trampoline[3] = OP (RISCV_ITYPE (JALR, 0, RISCV_PROLOGUE_TEMP_REGNUM, 0));
5093+
5094+#undef MATCH_LREG
5095+#undef OP
5096+
5097+ /* Copy the trampoline code. Leave any padding uninitialized. */
5098+ for (i = 0; i < ARRAY_SIZE (trampoline); i++)
5099+ {
5100+ mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
5101+ riscv_emit_move (mem, trampoline[i]);
5102+ }
5103+
5104+ /* Set up the static chain pointer field. */
5105+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
5106+ riscv_emit_move (mem, chain_value);
5107+
5108+ /* Set up the target function field. */
5109+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
5110+ riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
5111+
5112+ /* Flush the code part of the trampoline. */
5113+ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
5114+ emit_insn (gen_clear_cache (addr, end_addr));
5115+}
5116+
5117+static bool
5118+riscv_lra_p (void)
5119+{
5120+ return riscv_lra_flag;
5121+}
5122+
5123+/* Initialize the GCC target structure. */
5124+#undef TARGET_ASM_ALIGNED_HI_OP
5125+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
5126+#undef TARGET_ASM_ALIGNED_SI_OP
5127+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
5128+#undef TARGET_ASM_ALIGNED_DI_OP
5129+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
5130+
5131+#undef TARGET_OPTION_OVERRIDE
5132+#define TARGET_OPTION_OVERRIDE riscv_option_override
5133+
5134+#undef TARGET_LEGITIMIZE_ADDRESS
5135+#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
5136+
5137+#undef TARGET_SCHED_ADJUST_COST
5138+#define TARGET_SCHED_ADJUST_COST riscv_adjust_cost
5139+#undef TARGET_SCHED_ISSUE_RATE
5140+#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
5141+
5142+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
5143+#define TARGET_FUNCTION_OK_FOR_SIBCALL hook_bool_tree_tree_true
5144+
5145+#undef TARGET_REGISTER_MOVE_COST
5146+#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
5147+#undef TARGET_MEMORY_MOVE_COST
5148+#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
5149+#undef TARGET_RTX_COSTS
5150+#define TARGET_RTX_COSTS riscv_rtx_costs
5151+#undef TARGET_ADDRESS_COST
5152+#define TARGET_ADDRESS_COST riscv_address_cost
5153+
5154+#undef TARGET_PREFERRED_RELOAD_CLASS
5155+#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
5156+
5157+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
5158+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
5159+
5160+#undef TARGET_EXPAND_BUILTIN_VA_START
5161+#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
5162+
5163+#undef TARGET_PROMOTE_FUNCTION_MODE
5164+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
5165+
5166+#undef TARGET_RETURN_IN_MEMORY
5167+#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
5168+
5169+#undef TARGET_ASM_OUTPUT_MI_THUNK
5170+#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
5171+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
5172+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
5173+
5174+#undef TARGET_PRINT_OPERAND
5175+#define TARGET_PRINT_OPERAND riscv_print_operand
5176+#undef TARGET_PRINT_OPERAND_ADDRESS
5177+#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
5178+
5179+#undef TARGET_SETUP_INCOMING_VARARGS
5180+#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
5181+#undef TARGET_STRICT_ARGUMENT_NAMING
5182+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
5183+#undef TARGET_MUST_PASS_IN_STACK
5184+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
5185+#undef TARGET_PASS_BY_REFERENCE
5186+#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
5187+#undef TARGET_ARG_PARTIAL_BYTES
5188+#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
5189+#undef TARGET_FUNCTION_ARG
5190+#define TARGET_FUNCTION_ARG riscv_function_arg
5191+#undef TARGET_FUNCTION_ARG_ADVANCE
5192+#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
5193+#undef TARGET_FUNCTION_ARG_BOUNDARY
5194+#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
5195+
5196+#undef TARGET_MODE_REP_EXTENDED
5197+#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
5198+
5199+#undef TARGET_SCALAR_MODE_SUPPORTED_P
5200+#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
5201+
5202+#undef TARGET_INIT_BUILTINS
5203+#define TARGET_INIT_BUILTINS riscv_init_builtins
5204+#undef TARGET_BUILTIN_DECL
5205+#define TARGET_BUILTIN_DECL riscv_builtin_decl
5206+#undef TARGET_EXPAND_BUILTIN
5207+#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
5208+
5209+#undef TARGET_HAVE_TLS
5210+#define TARGET_HAVE_TLS HAVE_AS_TLS
5211+
5212+#undef TARGET_CANNOT_FORCE_CONST_MEM
5213+#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
5214+
5215+#undef TARGET_LEGITIMATE_CONSTANT_P
5216+#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
5217+
5218+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
5219+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
5220+
5221+#ifdef HAVE_AS_DTPRELWORD
5222+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
5223+#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
5224+#endif
5225+
5226+#undef TARGET_LEGITIMATE_ADDRESS_P
5227+#define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
5228+
5229+#undef TARGET_CAN_ELIMINATE
5230+#define TARGET_CAN_ELIMINATE riscv_can_eliminate
5231+
5232+#undef TARGET_CONDITIONAL_REGISTER_USAGE
5233+#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
5234+
5235+#undef TARGET_TRAMPOLINE_INIT
5236+#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
5237+
5238+#undef TARGET_IN_SMALL_DATA_P
5239+#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
5240+
5241+#undef TARGET_ASM_SELECT_RTX_SECTION
5242+#define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
5243+
5244+#undef TARGET_MIN_ANCHOR_OFFSET
5245+#define TARGET_MIN_ANCHOR_OFFSET (-RISCV_IMM_REACH/2)
5246+
5247+#undef TARGET_MAX_ANCHOR_OFFSET
5248+#define TARGET_MAX_ANCHOR_OFFSET (RISCV_IMM_REACH/2-1)
5249+
5250+#undef TARGET_LRA_P
5251+#define TARGET_LRA_P riscv_lra_p
5252+
5253+struct gcc_target targetm = TARGET_INITIALIZER;
5254+
5255+#include "gt-riscv.h"
5256diff -urN original-gcc/gcc/config/riscv/riscv-ftypes.def gcc/gcc/config/riscv/riscv-ftypes.def
5257--- original-gcc/gcc/config/riscv/riscv-ftypes.def 1970-01-01 01:00:00.000000000 +0100
5258+++ gcc-4.9.2/gcc/config/riscv/riscv-ftypes.def 2015-03-07 09:51:45.663139025 +0100
5259@@ -0,0 +1,39 @@
5260+/* Definitions of prototypes for RISC-V built-in functions.
5261+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
5262+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
5263+ Based on MIPS target for GNU compiler.
5264+
5265+This file is part of GCC.
5266+
5267+GCC is free software; you can redistribute it and/or modify
5268+it under the terms of the GNU General Public License as published by
5269+the Free Software Foundation; either version 3, or (at your option)
5270+any later version.
5271+
5272+GCC is distributed in the hope that it will be useful,
5273+but WITHOUT ANY WARRANTY; without even the implied warranty of
5274+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5275+GNU General Public License for more details.
5276+
5277+You should have received a copy of the GNU General Public License
5278+along with GCC; see the file COPYING3. If not see
5279+<http://www.gnu.org/licenses/>. */
5280+
5281+/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
5282+ MIPS built-in functions, where:
5283+
5284+ NARGS is the number of arguments.
5285+ LIST contains the return-type code followed by the codes for each
5286+ argument type.
5287+
5288+ Argument- and return-type codes are either modes or one of the following:
5289+
5290+ VOID for void_type_node
5291+ INT for integer_type_node
5292+ POINTER for ptr_type_node
5293+
5294+ (we don't use PTR because that's a ANSI-compatibillity macro).
5295+
5296+ Please keep this list lexicographically sorted by the LIST argument. */
5297+
5298+DEF_RISCV_FTYPE (1, (VOID, VOID))
5299diff -urN original-gcc/gcc/config/riscv/riscv.h gcc/gcc/config/riscv/riscv.h
5300--- original-gcc/gcc/config/riscv/riscv.h 1970-01-01 01:00:00.000000000 +0100
5301+++ gcc-4.9.2/gcc/config/riscv/riscv.h 2015-03-07 09:51:45.667139025 +0100
5302@@ -0,0 +1,1127 @@
5303+/* Definition of RISC-V target for GNU compiler.
5304+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
5305+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
5306+ Based on MIPS target for GNU compiler.
5307+
5308+This file is part of GCC.
5309+
5310+GCC is free software; you can redistribute it and/or modify
5311+it under the terms of the GNU General Public License as published by
5312+the Free Software Foundation; either version 3, or (at your option)
5313+any later version.
5314+
5315+GCC is distributed in the hope that it will be useful,
5316+but WITHOUT ANY WARRANTY; without even the implied warranty of
5317+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5318+GNU General Public License for more details.
5319+
5320+You should have received a copy of the GNU General Public License
5321+along with GCC; see the file COPYING3. If not see
5322+<http://www.gnu.org/licenses/>. */
5323+
5324+/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is
5325+ directly accessible, while the command-line options select
5326+ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI
5327+ in use. */
5328+#define TARGET_HARD_FLOAT TARGET_HARD_FLOAT_ABI
5329+#define TARGET_SOFT_FLOAT TARGET_SOFT_FLOAT_ABI
5330+
5331+/* Target CPU builtins. */
5332+#define TARGET_CPU_CPP_BUILTINS() \
5333+ do \
5334+ { \
5335+ builtin_assert ("machine=riscv"); \
5336+ \
5337+ builtin_assert ("cpu=riscv"); \
5338+ builtin_define ("__riscv__"); \
5339+ builtin_define ("__riscv"); \
5340+ builtin_define ("_riscv"); \
5341+ \
5342+ if (TARGET_64BIT) \
5343+ { \
5344+ builtin_define ("__riscv64"); \
5345+ builtin_define ("_RISCV_SIM=_ABI64"); \
5346+ } \
5347+ else \
5348+ builtin_define ("_RISCV_SIM=_ABI32"); \
5349+ \
5350+ builtin_define ("_ABI32=1"); \
5351+ builtin_define ("_ABI64=3"); \
5352+ \
5353+ \
5354+ builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE); \
5355+ builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE); \
5356+ builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE); \
5357+ builtin_define_with_int_value ("_RISCV_FPSET", 32); \
5358+ \
5359+ if (TARGET_ATOMIC) { \
5360+ builtin_define ("__riscv_atomic"); \
5361+ } \
5362+ \
5363+ /* These defines reflect the ABI in use, not whether the \
5364+ FPU is directly accessible. */ \
5365+ if (TARGET_HARD_FLOAT_ABI) { \
5366+ builtin_define ("__riscv_hard_float"); \
5367+ if (TARGET_FDIV) { \
5368+ builtin_define ("__riscv_fdiv"); \
5369+ builtin_define ("__riscv_fsqrt"); \
5370+ } \
5371+ } else \
5372+ builtin_define ("__riscv_soft_float"); \
5373+ \
5374+ /* The base RISC-V ISA is always little-endian. */ \
5375+ builtin_define_std ("RISCVEL"); \
5376+ builtin_define ("_RISCVEL"); \
5377+ \
5378+ /* Macros dependent on the C dialect. */ \
5379+ if (preprocessing_asm_p ()) \
5380+ { \
5381+ builtin_define_std ("LANGUAGE_ASSEMBLY"); \
5382+ builtin_define ("_LANGUAGE_ASSEMBLY"); \
5383+ } \
5384+ else if (c_dialect_cxx ()) \
5385+ { \
5386+ builtin_define ("_LANGUAGE_C_PLUS_PLUS"); \
5387+ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \
5388+ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \
5389+ } \
5390+ else \
5391+ { \
5392+ builtin_define_std ("LANGUAGE_C"); \
5393+ builtin_define ("_LANGUAGE_C"); \
5394+ } \
5395+ if (c_dialect_objc ()) \
5396+ { \
5397+ builtin_define ("_LANGUAGE_OBJECTIVE_C"); \
5398+ builtin_define ("__LANGUAGE_OBJECTIVE_C"); \
5399+ /* Bizarre, but needed at least for Irix. */ \
5400+ builtin_define_std ("LANGUAGE_C"); \
5401+ builtin_define ("_LANGUAGE_C"); \
5402+ } \
5403+ } \
5404+ while (0)
5405+
5406+/* Default target_flags if no switches are specified */
5407+
5408+#ifndef TARGET_DEFAULT
5409+#define TARGET_DEFAULT (TARGET_ATOMIC |
5410+#endif
5411+
5412+#ifndef RISCV_ARCH_STRING_DEFAULT
5413+#define RISCV_ARCH_STRING_DEFAULT "IMAFD"
5414+#endif
5415+
5416+#ifndef RISCV_TUNE_STRING_DEFAULT
5417+#define RISCV_TUNE_STRING_DEFAULT "rocket"
5418+#endif
5419+
5420+#ifndef TARGET_64BIT_DEFAULT
5421+#define TARGET_64BIT_DEFAULT 1
5422+#endif
5423+
5424+#if TARGET_64BIT_DEFAULT
5425+# define MULTILIB_ARCH_DEFAULT "m64"
5426+# define OPT_ARCH64 "!m32"
5427+# define OPT_ARCH32 "m32"
5428+#else
5429+# define MULTILIB_ARCH_DEFAULT "m32"
5430+# define OPT_ARCH64 "m64"
5431+# define OPT_ARCH32 "!m64"
5432+#endif
5433+
5434+#ifndef MULTILIB_DEFAULTS
5435+#define MULTILIB_DEFAULTS \
5436+ { MULTILIB_ARCH_DEFAULT }
5437+#endif
5438+
5439+
5440+/* Support for a compile-time default CPU, et cetera. The rules are:
5441+ --with-arch is ignored if -march is specified.
5442+ --with-tune is ignored if -mtune is specified.
5443+ --with-float is ignored if -mhard-float or -msoft-float are specified. */
5444+#define OPTION_DEFAULT_SPECS \
5445+ {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
5446+ {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
5447+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
5448+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
5449+
5450+#define DRIVER_SELF_SPECS ""
5451+
5452+#ifdef IN_LIBGCC2
5453+#undef TARGET_64BIT
5454+/* Make this compile time constant for libgcc2 */
5455+#ifdef __riscv64
5456+#define TARGET_64BIT 1
5457+#else
5458+#define TARGET_64BIT 0
5459+#endif
5460+#endif /* IN_LIBGCC2 */
5461+
5462+/* Tell collect what flags to pass to nm. */
5463+#ifndef NM_FLAGS
5464+#define NM_FLAGS "-Bn"
5465+#endif
5466+
5467+#undef ASM_SPEC
5468+#define ASM_SPEC "\
5469+%(subtarget_asm_debugging_spec) \
5470+%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
5471+%{fPIC|fpic|fPIE|fpie:-fpic} \
5472+%{march=*} \
5473+%(subtarget_asm_spec)"
5474+
5475+/* Extra switches sometimes passed to the linker. */
5476+
5477+#ifndef LINK_SPEC
5478+#define LINK_SPEC "\
5479+%{!T:-dT riscv.ld} \
5480+%{m64:-melf64lriscv} \
5481+%{m32:-melf32lriscv} \
5482+%{shared}"
5483+#endif /* LINK_SPEC defined */
5484+
5485+/* This macro defines names of additional specifications to put in the specs
5486+ that can be used in various specifications like CC1_SPEC. Its definition
5487+ is an initializer with a subgrouping for each command option.
5488+
5489+ Each subgrouping contains a string constant, that defines the
5490+ specification name, and a string constant that used by the GCC driver
5491+ program.
5492+
5493+ Do not define this macro if it does not need to do anything. */
5494+
5495+#define EXTRA_SPECS \
5496+ { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT }, \
5497+ SUBTARGET_EXTRA_SPECS
5498+
5499+#ifndef SUBTARGET_EXTRA_SPECS
5500+#define SUBTARGET_EXTRA_SPECS
5501+#endif
5502+
5503+/* By default, turn on GDB extensions. */
5504+#define DEFAULT_GDB_EXTENSIONS 1
5505+
5506+#define LOCAL_LABEL_PREFIX "."
5507+#define USER_LABEL_PREFIX ""
5508+
5509+#define DWARF2_DEBUGGING_INFO 1
5510+#define DWARF2_ASM_LINE_DEBUG_INFO 0
5511+
5512+/* The mapping from gcc register number to DWARF 2 CFA column number. */
5513+#define DWARF_FRAME_REGNUM(REGNO) \
5514+ (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
5515+
5516+/* The DWARF 2 CFA column which tracks the return address. */
5517+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
5518+
5519+/* Don't emit .cfi_sections, as it does not work */
5520+#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
5521+#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
5522+
5523+/* Before the prologue, RA lives in r31. */
5524+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
5525+
5526+/* Describe how we implement __builtin_eh_return. */
5527+#define EH_RETURN_DATA_REGNO(N) \
5528+ ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
5529+
5530+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
5531+
5532+/* Target machine storage layout */
5533+
5534+#define BITS_BIG_ENDIAN 0
5535+#define BYTES_BIG_ENDIAN 0
5536+#define WORDS_BIG_ENDIAN 0
5537+
5538+#define MAX_BITS_PER_WORD 64
5539+
5540+/* Width of a word, in units (bytes). */
5541+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
5542+#ifndef IN_LIBGCC2
5543+#define MIN_UNITS_PER_WORD 4
5544+#endif
5545+
5546+/* We currently require both or neither of the `F' and `D' extensions. */
5547+#define UNITS_PER_FPREG 8
5548+
5549+/* If FP regs aren't wide enough for a given FP argument, it is passed in
5550+ integer registers. */
5551+#define MIN_FPRS_PER_FMT 1
5552+
5553+/* The largest size of value that can be held in floating-point
5554+ registers and moved with a single instruction. */
5555+#define UNITS_PER_HWFPVALUE \
5556+ (TARGET_SOFT_FLOAT_ABI ? 0 : UNITS_PER_FPREG)
5557+
5558+/* The largest size of value that can be held in floating-point
5559+ registers. */
5560+#define UNITS_PER_FPVALUE \
5561+ (TARGET_SOFT_FLOAT_ABI ? 0 \
5562+ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
5563+
5564+/* The number of bytes in a double. */
5565+#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
5566+
5567+/* Set the sizes of the core types. */
5568+#define SHORT_TYPE_SIZE 16
5569+#define INT_TYPE_SIZE 32
5570+#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
5571+#define LONG_LONG_TYPE_SIZE 64
5572+
5573+#define FLOAT_TYPE_SIZE 32
5574+#define DOUBLE_TYPE_SIZE 64
5575+/* XXX The ABI says long doubles are IEEE-754-2008 float128s. */
5576+#define LONG_DOUBLE_TYPE_SIZE 64
5577+
5578+#ifdef IN_LIBGCC2
5579+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
5580+#endif
5581+
5582+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
5583+#define PARM_BOUNDARY BITS_PER_WORD
5584+
5585+/* Allocation boundary (in *bits*) for the code of a function. */
5586+#define FUNCTION_BOUNDARY 32
5587+
5588+/* There is no point aligning anything to a rounder boundary than this. */
5589+#define BIGGEST_ALIGNMENT 128
5590+
5591+/* All accesses must be aligned. */
5592+#define STRICT_ALIGNMENT 1
5593+
5594+/* Define this if you wish to imitate the way many other C compilers
5595+ handle alignment of bitfields and the structures that contain
5596+ them.
5597+
5598+ The behavior is that the type written for a bit-field (`int',
5599+ `short', or other integer type) imposes an alignment for the
5600+ entire structure, as if the structure really did contain an
5601+ ordinary field of that type. In addition, the bit-field is placed
5602+ within the structure so that it would fit within such a field,
5603+ not crossing a boundary for it.
5604+
5605+ Thus, on most machines, a bit-field whose type is written as `int'
5606+ would not cross a four-byte boundary, and would force four-byte
5607+ alignment for the whole structure. (The alignment used may not
5608+ be four bytes; it is controlled by the other alignment
5609+ parameters.)
5610+
5611+ If the macro is defined, its definition should be a C expression;
5612+ a nonzero value for the expression enables this behavior. */
5613+
5614+#define PCC_BITFIELD_TYPE_MATTERS 1
5615+
5616+/* If defined, a C expression to compute the alignment given to a
5617+ constant that is being placed in memory. CONSTANT is the constant
5618+ and ALIGN is the alignment that the object would ordinarily have.
5619+ The value of this macro is used instead of that alignment to align
5620+ the object.
5621+
5622+ If this macro is not defined, then ALIGN is used.
5623+
5624+ The typical use of this macro is to increase alignment for string
5625+ constants to be word aligned so that `strcpy' calls that copy
5626+ constants can be done inline. */
5627+
5628+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
5629+ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
5630+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
5631+
5632+/* If defined, a C expression to compute the alignment for a static
5633+ variable. TYPE is the data type, and ALIGN is the alignment that
5634+ the object would ordinarily have. The value of this macro is used
5635+ instead of that alignment to align the object.
5636+
5637+ If this macro is not defined, then ALIGN is used.
5638+
5639+ One use of this macro is to increase alignment of medium-size
5640+ data to make it all fit in fewer cache lines. Another is to
5641+ cause character arrays to be word-aligned so that `strcpy' calls
5642+ that copy constants to character arrays can be done inline. */
5643+
5644+#undef DATA_ALIGNMENT
5645+#define DATA_ALIGNMENT(TYPE, ALIGN) \
5646+ ((((ALIGN) < BITS_PER_WORD) \
5647+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
5648+ || TREE_CODE (TYPE) == UNION_TYPE \
5649+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
5650+
5651+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
5652+ character arrays to be word-aligned so that `strcpy' calls that copy
5653+ constants to character arrays can be done inline, and 'strcmp' can be
5654+ optimised to use word loads. */
5655+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
5656+ DATA_ALIGNMENT (TYPE, ALIGN)
5657+
5658+/* Define if operations between registers always perform the operation
5659+ on the full register even if a narrower mode is specified. */
5660+#define WORD_REGISTER_OPERATIONS
5661+
5662+/* When in 64-bit mode, move insns will sign extend SImode and CCmode
5663+ moves. All other references are zero extended. */
5664+#define LOAD_EXTEND_OP(MODE) \
5665+ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
5666+ ? SIGN_EXTEND : ZERO_EXTEND)
5667+
5668+/* Define this macro if it is advisable to hold scalars in registers
5669+ in a wider mode than that declared by the program. In such cases,
5670+ the value is constrained to be within the bounds of the declared
5671+ type, but kept valid in the wider mode. The signedness of the
5672+ extension may differ from that of the type. */
5673+
5674+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
5675+ if (GET_MODE_CLASS (MODE) == MODE_INT \
5676+ && GET_MODE_SIZE (MODE) < 4) \
5677+ { \
5678+ (MODE) = Pmode; \
5679+ }
5680+
5681+/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
5682+ Extensions of pointers to word_mode must be signed. */
5683+#define POINTERS_EXTEND_UNSIGNED false
5684+
5685+/* RV32 double-precision FP <-> integer moves go through memory */
5686+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
5687+ (!TARGET_64BIT && GET_MODE_SIZE (MODE) == 8 && \
5688+ (((CLASS1) == FP_REGS && (CLASS2) != FP_REGS) \
5689+ || ((CLASS2) == FP_REGS && (CLASS1) != FP_REGS)))
5690+
5691+/* Define if loading short immediate values into registers sign extends. */
5692+#define SHORT_IMMEDIATES_SIGN_EXTEND
5693+
5694+/* Standard register usage. */
5695+
5696+/* Number of hardware registers. We have:
5697+
5698+ - 32 integer registers
5699+ - 32 floating point registers
5700+ - 32 vector integer registers
5701+ - 32 vector floating point registers
5702+ - 2 fake registers:
5703+ - ARG_POINTER_REGNUM
5704+ - FRAME_POINTER_REGNUM */
5705+
5706+#define FIRST_PSEUDO_REGISTER 66
5707+
5708+/* x0, sp, gp, and tp are fixed. */
5709+
5710+#define FIXED_REGISTERS \
5711+{ /* General registers. */ \
5712+ 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
5713+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
5714+ /* Floating-point registers. */ \
5715+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
5716+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
5717+ /* Others. */ \
5718+ 1, 1 \
5719+}
5720+
5721+
5722+/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
5723+ The call RTLs themselves clobber ra. */
5724+
5725+#define CALL_USED_REGISTERS \
5726+{ /* General registers. */ \
5727+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
5728+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
5729+ /* Floating-point registers. */ \
5730+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
5731+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
5732+ /* Others. */ \
5733+ 1, 1 \
5734+}
5735+
5736+#define CALL_REALLY_USED_REGISTERS \
5737+{ /* General registers. */ \
5738+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
5739+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
5740+ /* Floating-point registers. */ \
5741+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
5742+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
5743+ /* Others. */ \
5744+ 1, 1 \
5745+}
5746+
5747+/* Internal macros to classify an ISA register's type. */
5748+
5749+#define GP_REG_FIRST 0
5750+#define GP_REG_LAST 31
5751+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
5752+
5753+#define FP_REG_FIRST 32
5754+#define FP_REG_LAST 63
5755+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
5756+
5757+/* The DWARF 2 CFA column which tracks the return address from a
5758+ signal handler context. This means that to maintain backwards
5759+ compatibility, no hard register can be assigned this column if it
5760+ would need to be handled by the DWARF unwinder. */
5761+#define DWARF_ALT_FRAME_RETURN_COLUMN 64
5762+
5763+#define GP_REG_P(REGNO) \
5764+ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
5765+#define FP_REG_P(REGNO) \
5766+ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
5767+
5768+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
5769+
5770+/* Return coprocessor number from register number. */
5771+
5772+#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) \
5773+ (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2' \
5774+ : COP3_REG_P (REGNO) ? '3' : '?')
5775+
5776+
5777+#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
5778+
5779+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
5780+ riscv_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
5781+
5782+#define MODES_TIEABLE_P(MODE1, MODE2) \
5783+ ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT \
5784+ && GET_MODE_CLASS (MODE2) == MODE_INT))
5785+
5786+/* Use s0 as the frame pointer if it is so requested. */
5787+#define HARD_FRAME_POINTER_REGNUM 8
5788+#define STACK_POINTER_REGNUM 2
5789+#define THREAD_POINTER_REGNUM 4
5790+
5791+/* These two registers don't really exist: they get eliminated to either
5792+ the stack or hard frame pointer. */
5793+#define ARG_POINTER_REGNUM 64
5794+#define FRAME_POINTER_REGNUM 65
5795+
5796+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
5797+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
5798+
5799+/* Register in which static-chain is passed to a function. */
5800+#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
5801+
5802+/* Registers used as temporaries in prologue/epilogue code.
5803+
5804+ The prologue registers mustn't conflict with any
5805+ incoming arguments, the static chain pointer, or the frame pointer.
5806+ The epilogue temporary mustn't conflict with the return registers,
5807+ the frame pointer, the EH stack adjustment, or the EH data registers. */
5808+
5809+#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
5810+#define RISCV_EPILOGUE_TEMP_REGNUM RISCV_PROLOGUE_TEMP_REGNUM
5811+
5812+#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
5813+#define RISCV_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_EPILOGUE_TEMP_REGNUM)
5814+
5815+#define FUNCTION_PROFILER(STREAM, LABELNO) \
5816+{ \
5817+ sorry ("profiler support for RISC-V"); \
5818+}
5819+
5820+/* Define this macro if it is as good or better to call a constant
5821+ function address than to call an address kept in a register. */
5822+#define NO_FUNCTION_CSE 1
5823+
5824+/* Define the classes of registers for register constraints in the
5825+ machine description. Also define ranges of constants.
5826+
5827+ One of the classes must always be named ALL_REGS and include all hard regs.
5828+ If there is more than one class, another class must be named NO_REGS
5829+ and contain no registers.
5830+
5831+ The name GENERAL_REGS must be the name of a class (or an alias for
5832+ another name such as ALL_REGS). This is the class of registers
5833+ that is allowed by "g" or "r" in a register constraint.
5834+ Also, registers outside this class are allocated only when
5835+ instructions express preferences for them.
5836+
5837+ The classes must be numbered in nondecreasing order; that is,
5838+ a larger-numbered class must never be contained completely
5839+ in a smaller-numbered class.
5840+
5841+ For any two classes, it is very desirable that there be another
5842+ class that represents their union. */
5843+
5844+enum reg_class
5845+{
5846+ NO_REGS, /* no registers in set */
5847+ T_REGS, /* registers used by indirect sibcalls */
5848+ GR_REGS, /* integer registers */
5849+ FP_REGS, /* floating point registers */
5850+ FRAME_REGS, /* $arg and $frame */
5851+ ALL_REGS, /* all registers */
5852+ LIM_REG_CLASSES /* max value + 1 */
5853+};
5854+
5855+#define N_REG_CLASSES (int) LIM_REG_CLASSES
5856+
5857+#define GENERAL_REGS GR_REGS
5858+
5859+/* An initializer containing the names of the register classes as C
5860+ string constants. These names are used in writing some of the
5861+ debugging dumps. */
5862+
5863+#define REG_CLASS_NAMES \
5864+{ \
5865+ "NO_REGS", \
5866+ "T_REGS", \
5867+ "GR_REGS", \
5868+ "FP_REGS", \
5869+ "FRAME_REGS", \
5870+ "ALL_REGS" \
5871+}
5872+
5873+/* An initializer containing the contents of the register classes,
5874+ as integers which are bit masks. The Nth integer specifies the
5875+ contents of class N. The way the integer MASK is interpreted is
5876+ that register R is in the class if `MASK & (1 << R)' is 1.
5877+
5878+ When the machine has more than 32 registers, an integer does not
5879+ suffice. Then the integers are replaced by sub-initializers,
5880+ braced groupings containing several integers. Each
5881+ sub-initializer must be suitable as an initializer for the type
5882+ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
5883+
5884+#define REG_CLASS_CONTENTS \
5885+{ \
5886+ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
5887+ { 0xf00000e0, 0x00000000, 0x00000000 }, /* T_REGS */ \
5888+ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
5889+ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
5890+ { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
5891+ { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
5892+}
5893+
5894+/* A C expression whose value is a register class containing hard
5895+ register REGNO. In general there is more that one such class;
5896+ choose a class which is "minimal", meaning that no smaller class
5897+ also contains the register. */
5898+
5899+#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
5900+
5901+/* A macro whose definition is the name of the class to which a
5902+ valid base register must belong. A base register is one used in
5903+ an address which is the register value plus a displacement. */
5904+
5905+#define BASE_REG_CLASS GR_REGS
5906+
5907+/* A macro whose definition is the name of the class to which a
5908+ valid index register must belong. An index register is one used
5909+ in an address where its value is either multiplied by a scale
5910+ factor or added to another register (as well as added to a
5911+ displacement). */
5912+
5913+#define INDEX_REG_CLASS NO_REGS
5914+
5915+/* We generally want to put call-clobbered registers ahead of
5916+ call-saved ones. (IRA expects this.) */
5917+
5918+#define REG_ALLOC_ORDER \
5919+{ \
5920+ /* Call-clobbered GPRs. */ \
5921+ 15, 14, 13, 12, 11, 10, 16, 17, 5, 6, 7, 28, 29, 30, 31, 1, \
5922+ /* Call-saved GPRs. */ \
5923+ 8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
5924+ /* GPRs that can never be exposed to the register allocator. */ \
5925+ 0, 2, 3, 4, \
5926+ /* Call-clobbered FPRs. */ \
5927+ 32, 33, 34, 35, 36, 37, 38, 39, 42, 43, 44, 45, 46, 47, 48, 49, \
5928+ 60, 61, 62, 63, \
5929+ /* Call-saved FPRs. */ \
5930+ 40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
5931+ /* None of the remaining classes have defined call-saved \
5932+ registers. */ \
5933+ 64, 65 \
5934+}
5935+
5936+/* True if VALUE is a signed 16-bit number. */
5937+
5938+#include "opcode-riscv.h"
5939+#define SMALL_OPERAND(VALUE) \
5940+ ((unsigned HOST_WIDE_INT) (VALUE) + RISCV_IMM_REACH/2 < RISCV_IMM_REACH)
5941+
5942+/* True if VALUE can be loaded into a register using LUI. */
5943+
5944+#define LUI_OPERAND(VALUE) \
5945+ (((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) == ((1UL<<31) - RISCV_IMM_REACH) \
5946+ || ((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) + RISCV_IMM_REACH == 0)
5947+
5948+/* Return a value X with the low 16 bits clear, and such that
5949+ VALUE - X is a signed 16-bit value. */
5950+
5951+#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X))
5952+#define LUI_INT(X) LUI_OPERAND (INTVAL (X))
5953+
5954+/* The HI and LO registers can only be reloaded via the general
5955+ registers. Condition code registers can only be loaded to the
5956+ general registers, and from the floating point registers. */
5957+
5958+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
5959+ riscv_secondary_reload_class (CLASS, MODE, X, true)
5960+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
5961+ riscv_secondary_reload_class (CLASS, MODE, X, false)
5962+
5963+/* Return the maximum number of consecutive registers
5964+ needed to represent mode MODE in a register of class CLASS. */
5965+
5966+#define CLASS_MAX_NREGS(CLASS, MODE) riscv_class_max_nregs (CLASS, MODE)
5967+
5968+/* It is undefined to interpret an FP register in a different format than
5969+ that which it was created to be. */
5970+
5971+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
5972+ reg_classes_intersect_p (FP_REGS, CLASS)
5973+
5974+/* Stack layout; function entry, exit and calling. */
5975+
5976+#define STACK_GROWS_DOWNWARD
5977+
5978+#define FRAME_GROWS_DOWNWARD 1
5979+
5980+#define STARTING_FRAME_OFFSET 0
5981+
5982+#define RETURN_ADDR_RTX riscv_return_addr
5983+
5984+#define ELIMINABLE_REGS \
5985+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
5986+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
5987+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
5988+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
5989+
5990+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
5991+ (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
5992+
5993+/* Allocate stack space for arguments at the beginning of each function. */
5994+#define ACCUMULATE_OUTGOING_ARGS 1
5995+
5996+/* The argument pointer always points to the first argument. */
5997+#define FIRST_PARM_OFFSET(FNDECL) 0
5998+
5999+#define REG_PARM_STACK_SPACE(FNDECL) 0
6000+
6001+/* Define this if it is the responsibility of the caller to
6002+ allocate the area reserved for arguments passed in registers.
6003+ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
6004+ of this macro is to determine whether the space is included in
6005+ `crtl->outgoing_args_size'. */
6006+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
6007+
6008+#define STACK_BOUNDARY 128
6009+
6010+/* Symbolic macros for the registers used to return integer and floating
6011+ point values. */
6012+
6013+#define GP_RETURN GP_ARG_FIRST
6014+#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : FP_ARG_FIRST)
6015+
6016+#define MAX_ARGS_IN_REGISTERS 8
6017+
6018+/* Symbolic macros for the first/last argument registers. */
6019+
6020+#define GP_ARG_FIRST (GP_REG_FIRST + 10)
6021+#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
6022+#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
6023+#define FP_ARG_FIRST (FP_REG_FIRST + 10)
6024+#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
6025+
6026+#define LIBCALL_VALUE(MODE) \
6027+ riscv_function_value (NULL_TREE, NULL_TREE, MODE)
6028+
6029+#define FUNCTION_VALUE(VALTYPE, FUNC) \
6030+ riscv_function_value (VALTYPE, FUNC, VOIDmode)
6031+
6032+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
6033+
6034+/* 1 if N is a possible register number for function argument passing.
6035+ We have no FP argument registers when soft-float. When FP registers
6036+ are 32 bits, we can't directly reference the odd numbered ones. */
6037+
6038+/* Accept arguments in a0-a7 and/or fa0-fa7. */
6039+#define FUNCTION_ARG_REGNO_P(N) \
6040+ (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
6041+ || IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))
6042+
6043+/* The ABI views the arguments as a structure, of which the first 8
6044+ words go in registers and the rest go on the stack. If I < 8, N, the Ith
6045+ word might go in the Ith integer argument register or the Ith
6046+ floating-point argument register. */
6047+
6048+typedef struct {
6049+ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
6050+ unsigned int num_gprs;
6051+
6052+ /* Number of words passed on the stack. */
6053+ unsigned int stack_words;
6054+} CUMULATIVE_ARGS;
6055+
6056+/* Initialize a variable CUM of type CUMULATIVE_ARGS
6057+ for a call to a function whose data type is FNTYPE.
6058+ For a library call, FNTYPE is 0. */
6059+
6060+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
6061+ memset (&(CUM), 0, sizeof (CUM))
6062+
6063+#define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
6064+
6065+/* ABI requires 16-byte alignment, even on ven on RV32. */
6066+#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
6067+
6068+#define NO_PROFILE_COUNTERS 1
6069+
6070+/* Define this macro if the code for function profiling should come
6071+ before the function prologue. Normally, the profiling code comes
6072+ after. */
6073+
6074+/* #define PROFILE_BEFORE_PROLOGUE */
6075+
6076+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
6077+ the stack pointer does not matter. The value is tested only in
6078+ functions that have frame pointers.
6079+ No definition is equivalent to always zero. */
6080+
6081+#define EXIT_IGNORE_STACK 1
6082+
6083+
6084+/* Trampolines are a block of code followed by two pointers. */
6085+
6086+#define TRAMPOLINE_CODE_SIZE 16
6087+#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
6088+#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
6089+
6090+/* Addressing modes, and classification of registers for them. */
6091+
6092+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
6093+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
6094+ riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
6095+
6096+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
6097+ and check its validity for a certain class.
6098+ We have two alternate definitions for each of them.
6099+ The usual definition accepts all pseudo regs; the other rejects them all.
6100+ The symbol REG_OK_STRICT causes the latter definition to be used.
6101+
6102+ Most source files want to accept pseudo regs in the hope that
6103+ they will get allocated to the class that the insn wants them to be in.
6104+ Some source files that are used after register allocation
6105+ need to be strict. */
6106+
6107+#ifndef REG_OK_STRICT
6108+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
6109+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
6110+#else
6111+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
6112+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
6113+#endif
6114+
6115+#define REG_OK_FOR_INDEX_P(X) 0
6116+
6117+
6118+/* Maximum number of registers that can appear in a valid memory address. */
6119+
6120+#define MAX_REGS_PER_ADDRESS 1
6121+
6122+#define CONSTANT_ADDRESS_P(X) \
6123+ (CONSTANT_P (X) && memory_address_p (SImode, X))
6124+
6125+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
6126+ 'the start of the function that this code is output in'. */
6127+
6128+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
6129+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
6130+ asm_fprintf ((FILE), "%U%s", \
6131+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
6132+ else \
6133+ asm_fprintf ((FILE), "%U%s", (NAME))
6134+
6135+/* This flag marks functions that cannot be lazily bound. */
6136+#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
6137+#define SYMBOL_REF_BIND_NOW_P(RTX) \
6138+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
6139+
6140+#define JUMP_TABLES_IN_TEXT_SECTION 0
6141+#define CASE_VECTOR_MODE SImode
6142+
6143+/* Define this as 1 if `char' should by default be signed; else as 0. */
6144+#define DEFAULT_SIGNED_CHAR 0
6145+
6146+/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
6147+#define MOVE_MAX UNITS_PER_WORD
6148+#define MAX_MOVE_MAX 8
6149+
6150+#define SLOW_BYTE_ACCESS 0
6151+
6152+#define SHIFT_COUNT_TRUNCATED 1
6153+
6154+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
6155+ is done just by pretending it is already truncated. */
6156+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
6157+ (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
6158+
6159+/* Specify the machine mode that pointers have.
6160+ After generation of rtl, the compiler makes no further distinction
6161+ between pointers and any other objects of this machine mode. */
6162+
6163+#ifndef Pmode
6164+#define Pmode (TARGET_64BIT ? DImode : SImode)
6165+#endif
6166+
6167+/* Give call MEMs SImode since it is the "most permissive" mode
6168+ for both 32-bit and 64-bit targets. */
6169+
6170+#define FUNCTION_MODE SImode
6171+
6172+/* A C expression for the cost of a branch instruction. A value of 2
6173+ seems to minimize code size. */
6174+
6175+#define BRANCH_COST(speed_p, predictable_p) \
6176+ ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
6177+
6178+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
6179+
6180+/* Control the assembler format that we output. */
6181+
6182+/* Output to assembler file text saying following lines
6183+ may contain character constants, extra white space, comments, etc. */
6184+
6185+#ifndef ASM_APP_ON
6186+#define ASM_APP_ON " #APP\n"
6187+#endif
6188+
6189+/* Output to assembler file text saying following lines
6190+ no longer contain unusual constructs. */
6191+
6192+#ifndef ASM_APP_OFF
6193+#define ASM_APP_OFF " #NO_APP\n"
6194+#endif
6195+
6196+#define REGISTER_NAMES \
6197+{ "zero","ra", "sp", "gp", "tp", "t0", "t1", "t2", \
6198+ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", \
6199+ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", \
6200+ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", \
6201+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
6202+ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
6203+ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
6204+ "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
6205+ "arg", "frame", }
6206+
6207+#define ADDITIONAL_REGISTER_NAMES \
6208+{ \
6209+ { "x0", 0 + GP_REG_FIRST }, \
6210+ { "x1", 1 + GP_REG_FIRST }, \
6211+ { "x2", 2 + GP_REG_FIRST }, \
6212+ { "x3", 3 + GP_REG_FIRST }, \
6213+ { "x4", 4 + GP_REG_FIRST }, \
6214+ { "x5", 5 + GP_REG_FIRST }, \
6215+ { "x6", 6 + GP_REG_FIRST }, \
6216+ { "x7", 7 + GP_REG_FIRST }, \
6217+ { "x8", 8 + GP_REG_FIRST }, \
6218+ { "x9", 9 + GP_REG_FIRST }, \
6219+ { "x10", 10 + GP_REG_FIRST }, \
6220+ { "x11", 11 + GP_REG_FIRST }, \
6221+ { "x12", 12 + GP_REG_FIRST }, \
6222+ { "x13", 13 + GP_REG_FIRST }, \
6223+ { "x14", 14 + GP_REG_FIRST }, \
6224+ { "x15", 15 + GP_REG_FIRST }, \
6225+ { "x16", 16 + GP_REG_FIRST }, \
6226+ { "x17", 17 + GP_REG_FIRST }, \
6227+ { "x18", 18 + GP_REG_FIRST }, \
6228+ { "x19", 19 + GP_REG_FIRST }, \
6229+ { "x20", 20 + GP_REG_FIRST }, \
6230+ { "x21", 21 + GP_REG_FIRST }, \
6231+ { "x22", 22 + GP_REG_FIRST }, \
6232+ { "x23", 23 + GP_REG_FIRST }, \
6233+ { "x24", 24 + GP_REG_FIRST }, \
6234+ { "x25", 25 + GP_REG_FIRST }, \
6235+ { "x26", 26 + GP_REG_FIRST }, \
6236+ { "x27", 27 + GP_REG_FIRST }, \
6237+ { "x28", 28 + GP_REG_FIRST }, \
6238+ { "x29", 29 + GP_REG_FIRST }, \
6239+ { "x30", 30 + GP_REG_FIRST }, \
6240+ { "x31", 31 + GP_REG_FIRST }, \
6241+ { "f0", 0 + FP_REG_FIRST }, \
6242+ { "f1", 1 + FP_REG_FIRST }, \
6243+ { "f2", 2 + FP_REG_FIRST }, \
6244+ { "f3", 3 + FP_REG_FIRST }, \
6245+ { "f4", 4 + FP_REG_FIRST }, \
6246+ { "f5", 5 + FP_REG_FIRST }, \
6247+ { "f6", 6 + FP_REG_FIRST }, \
6248+ { "f7", 7 + FP_REG_FIRST }, \
6249+ { "f8", 8 + FP_REG_FIRST }, \
6250+ { "f9", 9 + FP_REG_FIRST }, \
6251+ { "f10", 10 + FP_REG_FIRST }, \
6252+ { "f11", 11 + FP_REG_FIRST }, \
6253+ { "f12", 12 + FP_REG_FIRST }, \
6254+ { "f13", 13 + FP_REG_FIRST }, \
6255+ { "f14", 14 + FP_REG_FIRST }, \
6256+ { "f15", 15 + FP_REG_FIRST }, \
6257+ { "f16", 16 + FP_REG_FIRST }, \
6258+ { "f17", 17 + FP_REG_FIRST }, \
6259+ { "f18", 18 + FP_REG_FIRST }, \
6260+ { "f19", 19 + FP_REG_FIRST }, \
6261+ { "f20", 20 + FP_REG_FIRST }, \
6262+ { "f21", 21 + FP_REG_FIRST }, \
6263+ { "f22", 22 + FP_REG_FIRST }, \
6264+ { "f23", 23 + FP_REG_FIRST }, \
6265+ { "f24", 24 + FP_REG_FIRST }, \
6266+ { "f25", 25 + FP_REG_FIRST }, \
6267+ { "f26", 26 + FP_REG_FIRST }, \
6268+ { "f27", 27 + FP_REG_FIRST }, \
6269+ { "f28", 28 + FP_REG_FIRST }, \
6270+ { "f29", 29 + FP_REG_FIRST }, \
6271+ { "f30", 30 + FP_REG_FIRST }, \
6272+ { "f31", 31 + FP_REG_FIRST }, \
6273+}
6274+
6275+/* Globalizing directive for a label. */
6276+#define GLOBAL_ASM_OP "\t.globl\t"
6277+
6278+/* This is how to store into the string LABEL
6279+ the symbol_ref name of an internal numbered label where
6280+ PREFIX is the class of label and NUM is the number within the class.
6281+ This is suitable for output with `assemble_name'. */
6282+
6283+#undef ASM_GENERATE_INTERNAL_LABEL
6284+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
6285+ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
6286+
6287+/* This is how to output an element of a case-vector that is absolute. */
6288+
6289+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
6290+ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
6291+
6292+/* This is how to output an element of a PIC case-vector. */
6293+
6294+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
6295+ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
6296+ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
6297+
6298+/* This is how to output an assembler line
6299+ that says to advance the location counter
6300+ to a multiple of 2**LOG bytes. */
6301+
6302+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
6303+ fprintf (STREAM, "\t.align\t%d\n", (LOG))
6304+
6305+/* Define the strings to put out for each section in the object file. */
6306+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
6307+#define DATA_SECTION_ASM_OP "\t.data" /* large data */
6308+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
6309+#define BSS_SECTION_ASM_OP "\t.bss"
6310+#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\",@nobits"
6311+#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\",@progbits"
6312+
6313+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
6314+do \
6315+ { \
6316+ fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
6317+ reg_names[STACK_POINTER_REGNUM], \
6318+ reg_names[STACK_POINTER_REGNUM], \
6319+ TARGET_64BIT ? "sd" : "sw", \
6320+ reg_names[REGNO], \
6321+ reg_names[STACK_POINTER_REGNUM]); \
6322+ } \
6323+while (0)
6324+
6325+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
6326+do \
6327+ { \
6328+ fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n", \
6329+ TARGET_64BIT ? "ld" : "lw", \
6330+ reg_names[REGNO], \
6331+ reg_names[STACK_POINTER_REGNUM], \
6332+ reg_names[STACK_POINTER_REGNUM], \
6333+ reg_names[STACK_POINTER_REGNUM]); \
6334+ } \
6335+while (0)
6336+
6337+#define ASM_COMMENT_START "#"
6338+
6339+#undef SIZE_TYPE
6340+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
6341+
6342+#undef PTRDIFF_TYPE
6343+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
6344+
6345+/* The maximum number of bytes that can be copied by one iteration of
6346+ a movmemsi loop; see riscv_block_move_loop. */
6347+#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
6348+
6349+/* The maximum number of bytes that can be copied by a straight-line
6350+ implementation of movmemsi; see riscv_block_move_straight. We want
6351+ to make sure that any loop-based implementation will iterate at
6352+ least twice. */
6353+#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
6354+
6355+/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
6356+
6357+#define RISCV_CALL_RATIO 6
6358+
6359+/* Any loop-based implementation of movmemsi will have at least
6360+ RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
6361+ moves, so allow individual copies of fewer elements.
6362+
6363+ When movmemsi is not available, use a value approximating
6364+ the length of a memcpy call sequence, so that move_by_pieces
6365+ will generate inline code if it is shorter than a function call.
6366+ Since move_by_pieces_ninsns counts memory-to-memory moves, but
6367+ we'll have to generate a load/store pair for each, halve the
6368+ value of RISCV_CALL_RATIO to take that into account. */
6369+
6370+#define MOVE_RATIO(speed) \
6371+ (HAVE_movmemsi \
6372+ ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
6373+ : RISCV_CALL_RATIO / 2)
6374+
6375+/* movmemsi is meant to generate code that is at least as good as
6376+ move_by_pieces. However, movmemsi effectively uses a by-pieces
6377+ implementation both for moves smaller than a word and for word-aligned
6378+ moves of no more than RISCV_MAX_MOVE_BYTES_STRAIGHT bytes. We should
6379+ allow the tree-level optimisers to do such moves by pieces, as it
6380+ often exposes other optimization opportunities. We might as well
6381+ continue to use movmemsi at the rtl level though, as it produces
6382+ better code when scheduling is disabled (such as at -O). */
6383+
6384+#define MOVE_BY_PIECES_P(SIZE, ALIGN) \
6385+ (HAVE_movmemsi \
6386+ ? (!currently_expanding_to_rtl \
6387+ && ((ALIGN) < BITS_PER_WORD \
6388+ ? (SIZE) < UNITS_PER_WORD \
6389+ : (SIZE) <= RISCV_MAX_MOVE_BYTES_STRAIGHT)) \
6390+ : (move_by_pieces_ninsns (SIZE, ALIGN, MOVE_MAX_PIECES + 1) \
6391+ < (unsigned int) MOVE_RATIO (false)))
6392+
6393+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
6394+ of the length of a memset call, but use the default otherwise. */
6395+
6396+#define CLEAR_RATIO(speed)\
6397+ ((speed) ? 15 : RISCV_CALL_RATIO)
6398+
6399+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
6400+ optimizing for size adjust the ratio to account for the overhead of
6401+ loading the constant and replicating it across the word. */
6402+
6403+#define SET_RATIO(speed) \
6404+ ((speed) ? 15 : RISCV_CALL_RATIO - 2)
6405+
6406+/* STORE_BY_PIECES_P can be used when copying a constant string, but
6407+ in that case each word takes 3 insns (lui, ori, sw), or more in
6408+ 64-bit mode, instead of 2 (lw, sw). For now we always fail this
6409+ and let the move_by_pieces code copy the string from read-only
6410+ memory. In the future, this could be tuned further for multi-issue
6411+ CPUs that can issue stores down one pipe and arithmetic instructions
6412+ down another; in that case, the lui/ori/sw combination would be a
6413+ win for long enough strings. */
6414+
6415+#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
6416+
6417+#ifndef HAVE_AS_TLS
6418+#define HAVE_AS_TLS 0
6419+#endif
6420+
6421+#ifndef USED_FOR_TARGET
6422+
6423+extern const enum reg_class riscv_regno_to_class[];
6424+extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
6425+extern const char* riscv_hi_relocs[];
6426+#endif
6427+
6428+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
6429+ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
6430diff -urN original-gcc/gcc/config/riscv/riscv.md gcc/gcc/config/riscv/riscv.md
6431--- original-gcc/gcc/config/riscv/riscv.md 1970-01-01 01:00:00.000000000 +0100
6432+++ gcc-4.9.2/gcc/config/riscv/riscv.md 2015-03-07 09:51:45.667139025 +0100
6433@@ -0,0 +1,2423 @@
6434+;; Machine description for RISC-V for GNU compiler.
6435+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
6436+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
6437+;; Based on MIPS target for GNU compiler.
6438+
6439+;; This file is part of GCC.
6440+
6441+;; GCC is free software; you can redistribute it and/or modify
6442+;; it under the terms of the GNU General Public License as published by
6443+;; the Free Software Foundation; either version 3, or (at your option)
6444+;; any later version.
6445+
6446+;; GCC is distributed in the hope that it will be useful,
6447+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
6448+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6449+;; GNU General Public License for more details.
6450+
6451+;; You should have received a copy of the GNU General Public License
6452+;; along with GCC; see the file COPYING3. If not see
6453+;; <http://www.gnu.org/licenses/>.
6454+
6455+(define_c_enum "unspec" [
6456+ ;; Floating-point moves.
6457+ UNSPEC_LOAD_LOW
6458+ UNSPEC_LOAD_HIGH
6459+ UNSPEC_STORE_WORD
6460+
6461+ ;; GP manipulation.
6462+ UNSPEC_EH_RETURN
6463+
6464+ ;; Symbolic accesses.
6465+ UNSPEC_ADDRESS_FIRST
6466+ UNSPEC_LOAD_GOT
6467+ UNSPEC_TLS
6468+ UNSPEC_TLS_LE
6469+ UNSPEC_TLS_IE
6470+ UNSPEC_TLS_GD
6471+
6472+ ;; Blockage and synchronisation.
6473+ UNSPEC_BLOCKAGE
6474+ UNSPEC_FENCE
6475+ UNSPEC_FENCE_I
6476+])
6477+
6478+(define_constants
6479+ [(RETURN_ADDR_REGNUM 1)
6480+])
6481+
6482+(include "predicates.md")
6483+(include "constraints.md")
6484+
6485+;; ....................
6486+;;
6487+;; Attributes
6488+;;
6489+;; ....................
6490+
6491+(define_attr "got" "unset,xgot_high,load"
6492+ (const_string "unset"))
6493+
6494+;; For jal instructions, this attribute is DIRECT when the target address
6495+;; is symbolic and INDIRECT when it is a register.
6496+(define_attr "jal" "unset,direct,indirect"
6497+ (const_string "unset"))
6498+
6499+;; Classification of moves, extensions and truncations. Most values
6500+;; are as for "type" (see below) but there are also the following
6501+;; move-specific values:
6502+;;
6503+;; andi a single ANDI instruction
6504+;; shift_shift a shift left followed by a shift right
6505+;;
6506+;; This attribute is used to determine the instruction's length and
6507+;; scheduling type. For doubleword moves, the attribute always describes
6508+;; the split instructions; in some cases, it is more appropriate for the
6509+;; scheduling type to be "multi" instead.
6510+(define_attr "move_type"
6511+ "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
6512+ const,logical,arith,andi,shift_shift"
6513+ (const_string "unknown"))
6514+
6515+(define_attr "alu_type" "unknown,add,sub,and,or,xor"
6516+ (const_string "unknown"))
6517+
6518+;; Main data type used by the insn
6519+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
6520+ (const_string "unknown"))
6521+
6522+;; True if the main data type is twice the size of a word.
6523+(define_attr "dword_mode" "no,yes"
6524+ (cond [(and (eq_attr "mode" "DI,DF")
6525+ (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
6526+ (const_string "yes")
6527+
6528+ (and (eq_attr "mode" "TI,TF")
6529+ (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
6530+ (const_string "yes")]
6531+ (const_string "no")))
6532+
6533+;; Classification of each insn.
6534+;; branch conditional branch
6535+;; jump unconditional jump
6536+;; call unconditional call
6537+;; load load instruction(s)
6538+;; fpload floating point load
6539+;; fpidxload floating point indexed load
6540+;; store store instruction(s)
6541+;; fpstore floating point store
6542+;; fpidxstore floating point indexed store
6543+;; mtc transfer to coprocessor
6544+;; mfc transfer from coprocessor
6545+;; const load constant
6546+;; arith integer arithmetic instructions
6547+;; logical integer logical instructions
6548+;; shift integer shift instructions
6549+;; slt set less than instructions
6550+;; imul integer multiply
6551+;; idiv integer divide
6552+;; move integer register move (addi rd, rs1, 0)
6553+;; fmove floating point register move
6554+;; fadd floating point add/subtract
6555+;; fmul floating point multiply
6556+;; fmadd floating point multiply-add
6557+;; fdiv floating point divide
6558+;; fcmp floating point compare
6559+;; fcvt floating point convert
6560+;; fsqrt floating point square root
6561+;; multi multiword sequence (or user asm statements)
6562+;; nop no operation
6563+;; ghost an instruction that produces no real code
6564+(define_attr "type"
6565+ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
6566+ mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
6567+ fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
6568+ (cond [(eq_attr "jal" "!unset") (const_string "call")
6569+ (eq_attr "got" "load") (const_string "load")
6570+
6571+ (eq_attr "alu_type" "add,sub") (const_string "arith")
6572+
6573+ (eq_attr "alu_type" "and,or,xor") (const_string "logical")
6574+
6575+ ;; If a doubleword move uses these expensive instructions,
6576+ ;; it is usually better to schedule them in the same way
6577+ ;; as the singleword form, rather than as "multi".
6578+ (eq_attr "move_type" "load") (const_string "load")
6579+ (eq_attr "move_type" "fpload") (const_string "fpload")
6580+ (eq_attr "move_type" "store") (const_string "store")
6581+ (eq_attr "move_type" "fpstore") (const_string "fpstore")
6582+ (eq_attr "move_type" "mtc") (const_string "mtc")
6583+ (eq_attr "move_type" "mfc") (const_string "mfc")
6584+
6585+ ;; These types of move are always single insns.
6586+ (eq_attr "move_type" "fmove") (const_string "fmove")
6587+ (eq_attr "move_type" "arith") (const_string "arith")
6588+ (eq_attr "move_type" "logical") (const_string "logical")
6589+ (eq_attr "move_type" "andi") (const_string "logical")
6590+
6591+ ;; These types of move are always split.
6592+ (eq_attr "move_type" "shift_shift")
6593+ (const_string "multi")
6594+
6595+ ;; These types of move are split for doubleword modes only.
6596+ (and (eq_attr "move_type" "move,const")
6597+ (eq_attr "dword_mode" "yes"))
6598+ (const_string "multi")
6599+ (eq_attr "move_type" "move") (const_string "move")
6600+ (eq_attr "move_type" "const") (const_string "const")]
6601+ (const_string "unknown")))
6602+
6603+;; Mode for conversion types (fcvt)
6604+;; I2S integer to float single (SI/DI to SF)
6605+;; I2D integer to float double (SI/DI to DF)
6606+;; S2I float to integer (SF to SI/DI)
6607+;; D2I float to integer (DF to SI/DI)
6608+;; D2S double to float single
6609+;; S2D float single to double
6610+
6611+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D"
6612+ (const_string "unknown"))
6613+
6614+;; Length of instruction in bytes.
6615+(define_attr "length" ""
6616+ (cond [
6617+ ;; Direct branch instructions have a range of [-0x1000,0xffc],
6618+ ;; relative to the address of the delay slot. If a branch is
6619+ ;; outside this range, convert a branch like:
6620+ ;;
6621+ ;; bne r1,r2,target
6622+ ;;
6623+ ;; to:
6624+ ;;
6625+ ;; beq r1,r2,1f
6626+ ;; j target
6627+ ;; 1:
6628+ ;;
6629+ (eq_attr "type" "branch")
6630+ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
6631+ (le (minus (pc) (match_dup 0)) (const_int 4092)))
6632+ (const_int 4)
6633+ (const_int 8))
6634+
6635+ ;; Conservatively assume calls take two instructions, as in:
6636+ ;; auipc t0, %pcrel_hi(target)
6637+ ;; jalr ra, t0, %lo(target)
6638+ ;; The linker will relax these into JAL when appropriate.
6639+ (eq_attr "type" "call")
6640+ (const_int 8)
6641+
6642+ ;; "Ghost" instructions occupy no space.
6643+ (eq_attr "type" "ghost")
6644+ (const_int 0)
6645+
6646+ (eq_attr "got" "load") (const_int 8)
6647+
6648+ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
6649+ (eq_attr "move_type" "shift_shift")
6650+ (const_int 8)
6651+
6652+ ;; Check for doubleword moves that are decomposed into two
6653+ ;; instructions.
6654+ (and (eq_attr "move_type" "mtc,mfc,move")
6655+ (eq_attr "dword_mode" "yes"))
6656+ (const_int 8)
6657+
6658+ ;; Doubleword CONST{,N} moves are split into two word
6659+ ;; CONST{,N} moves.
6660+ (and (eq_attr "move_type" "const")
6661+ (eq_attr "dword_mode" "yes"))
6662+ (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
6663+
6664+ ;; Otherwise, constants, loads and stores are handled by external
6665+ ;; routines.
6666+ (eq_attr "move_type" "load,fpload")
6667+ (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
6668+ (eq_attr "move_type" "store,fpstore")
6669+ (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
6670+ ] (const_int 4)))
6671+
6672+;; Describe a user's asm statement.
6673+(define_asm_attributes
6674+ [(set_attr "type" "multi")])
6675+
6676+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
6677+;; from the same template.
6678+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
6679+(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
6680+
6681+;; A copy of GPR that can be used when a pattern has two independent
6682+;; modes.
6683+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
6684+
6685+;; This mode iterator allows :P to be used for patterns that operate on
6686+;; pointer-sized quantities. Exactly one of the two alternatives will match.
6687+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
6688+
6689+;; 32-bit integer moves for which we provide move patterns.
6690+(define_mode_iterator IMOVE32 [SI])
6691+
6692+;; 64-bit modes for which we provide move patterns.
6693+(define_mode_iterator MOVE64 [DI DF])
6694+
6695+;; 128-bit modes for which we provide move patterns on 64-bit targets.
6696+(define_mode_iterator MOVE128 [TI TF])
6697+
6698+;; This mode iterator allows the QI and HI extension patterns to be
6699+;; defined from the same template.
6700+(define_mode_iterator SHORT [QI HI])
6701+
6702+;; Likewise the 64-bit truncate-and-shift patterns.
6703+(define_mode_iterator SUBDI [QI HI SI])
6704+(define_mode_iterator HISI [HI SI])
6705+(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
6706+
6707+;; This mode iterator allows :ANYF to be used wherever a scalar or vector
6708+;; floating-point mode is allowed.
6709+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
6710+ (DF "TARGET_HARD_FLOAT")])
6711+(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
6712+ (SF "TARGET_HARD_FLOAT")
6713+ (DF "TARGET_HARD_FLOAT")])
6714+
6715+;; Like ANYF, but only applies to scalar modes.
6716+(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT")
6717+ (DF "TARGET_HARD_FLOAT")])
6718+
6719+;; A floating-point mode for which moves involving FPRs may need to be split.
6720+(define_mode_iterator SPLITF
6721+ [(DF "!TARGET_64BIT")
6722+ (DI "!TARGET_64BIT")
6723+ (TF "TARGET_64BIT")])
6724+
6725+;; This attribute gives the length suffix for a sign- or zero-extension
6726+;; instruction.
6727+(define_mode_attr size [(QI "b") (HI "h")])
6728+
6729+;; Mode attributes for loads.
6730+(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
6731+
6732+;; Instruction names for stores.
6733+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
6734+
6735+;; This attribute gives the best constraint to use for registers of
6736+;; a given mode.
6737+(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
6738+
6739+;; This attribute gives the format suffix for floating-point operations.
6740+(define_mode_attr fmt [(SF "s") (DF "d")])
6741+
6742+;; This attribute gives the format suffix for atomic memory operations.
6743+(define_mode_attr amo [(SI "w") (DI "d")])
6744+
6745+;; This attribute gives the upper-case mode name for one unit of a
6746+;; floating-point mode.
6747+(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
6748+
6749+;; This attribute gives the integer mode that has half the size of
6750+;; the controlling mode.
6751+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
6752+
6753+;; This code iterator allows signed and unsigned widening multiplications
6754+;; to use the same template.
6755+(define_code_iterator any_extend [sign_extend zero_extend])
6756+
6757+;; This code iterator allows the two right shift instructions to be
6758+;; generated from the same template.
6759+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
6760+
6761+;; This code iterator allows the three shift instructions to be generated
6762+;; from the same template.
6763+(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
6764+
6765+;; This code iterator allows unsigned and signed division to be generated
6766+;; from the same template.
6767+(define_code_iterator any_div [div udiv])
6768+
6769+;; This code iterator allows unsigned and signed modulus to be generated
6770+;; from the same template.
6771+(define_code_iterator any_mod [mod umod])
6772+
6773+;; These code iterators allow the signed and unsigned scc operations to use
6774+;; the same template.
6775+(define_code_iterator any_gt [gt gtu])
6776+(define_code_iterator any_ge [ge geu])
6777+(define_code_iterator any_lt [lt ltu])
6778+(define_code_iterator any_le [le leu])
6779+
6780+;; <u> expands to an empty string when doing a signed operation and
6781+;; "u" when doing an unsigned operation.
6782+(define_code_attr u [(sign_extend "") (zero_extend "u")
6783+ (div "") (udiv "u")
6784+ (mod "") (umod "u")
6785+ (gt "") (gtu "u")
6786+ (ge "") (geu "u")
6787+ (lt "") (ltu "u")
6788+ (le "") (leu "u")])
6789+
6790+;; <su> is like <u>, but the signed form expands to "s" rather than "".
6791+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
6792+
6793+;; <optab> expands to the name of the optab for a particular code.
6794+(define_code_attr optab [(ashift "ashl")
6795+ (ashiftrt "ashr")
6796+ (lshiftrt "lshr")
6797+ (ior "ior")
6798+ (xor "xor")
6799+ (and "and")
6800+ (plus "add")
6801+ (minus "sub")])
6802+
6803+;; <insn> expands to the name of the insn that implements a particular code.
6804+(define_code_attr insn [(ashift "sll")
6805+ (ashiftrt "sra")
6806+ (lshiftrt "srl")
6807+ (ior "or")
6808+ (xor "xor")
6809+ (and "and")
6810+ (plus "add")
6811+ (minus "sub")])
6812+
6813+;; Pipeline descriptions.
6814+;;
6815+;; generic.md provides a fallback for processors without a specific
6816+;; pipeline description. It is derived from the old define_function_unit
6817+;; version and uses the "alu" and "imuldiv" units declared below.
6818+;;
6819+;; Some of the processor-specific files are also derived from old
6820+;; define_function_unit descriptions and simply override the parts of
6821+;; generic.md that don't apply. The other processor-specific files
6822+;; are self-contained.
6823+(define_automaton "alu,imuldiv")
6824+
6825+(define_cpu_unit "alu" "alu")
6826+(define_cpu_unit "imuldiv" "imuldiv")
6827+
6828+;; Ghost instructions produce no real code and introduce no hazards.
6829+;; They exist purely to express an effect on dataflow.
6830+(define_insn_reservation "ghost" 0
6831+ (eq_attr "type" "ghost")
6832+ "nothing")
6833+
6834+(include "generic.md")
6835+
6836+;;
6837+;; ....................
6838+;;
6839+;; ADDITION
6840+;;
6841+;; ....................
6842+;;
6843+
6844+(define_insn "add<mode>3"
6845+ [(set (match_operand:ANYF 0 "register_operand" "=f")
6846+ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
6847+ (match_operand:ANYF 2 "register_operand" "f")))]
6848+ ""
6849+ "fadd.<fmt>\t%0,%1,%2"
6850+ [(set_attr "type" "fadd")
6851+ (set_attr "mode" "<UNITMODE>")])
6852+
6853+(define_expand "add<mode>3"
6854+ [(set (match_operand:GPR 0 "register_operand")
6855+ (plus:GPR (match_operand:GPR 1 "register_operand")
6856+ (match_operand:GPR 2 "arith_operand")))]
6857+ "")
6858+
6859+(define_insn "*addsi3"
6860+ [(set (match_operand:SI 0 "register_operand" "=r,r")
6861+ (plus:SI (match_operand:GPR 1 "register_operand" "r,r")
6862+ (match_operand:GPR2 2 "arith_operand" "r,Q")))]
6863+ ""
6864+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
6865+ [(set_attr "type" "arith")
6866+ (set_attr "mode" "SI")])
6867+
6868+(define_insn "*adddi3"
6869+ [(set (match_operand:DI 0 "register_operand" "=r,r")
6870+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
6871+ (match_operand:DI 2 "arith_operand" "r,Q")))]
6872+ "TARGET_64BIT"
6873+ "add\t%0,%1,%2"
6874+ [(set_attr "type" "arith")
6875+ (set_attr "mode" "DI")])
6876+
6877+(define_insn "*addsi3_extended"
6878+ [(set (match_operand:DI 0 "register_operand" "=r,r")
6879+ (sign_extend:DI
6880+ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
6881+ (match_operand:SI 2 "arith_operand" "r,Q"))))]
6882+ "TARGET_64BIT"
6883+ "addw\t%0,%1,%2"
6884+ [(set_attr "type" "arith")
6885+ (set_attr "mode" "SI")])
6886+
6887+(define_insn "*adddisi3"
6888+ [(set (match_operand:SI 0 "register_operand" "=r,r")
6889+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
6890+ (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
6891+ "TARGET_64BIT"
6892+ "addw\t%0,%1,%2"
6893+ [(set_attr "type" "arith")
6894+ (set_attr "mode" "SI")])
6895+
6896+(define_insn "*adddisisi3"
6897+ [(set (match_operand:SI 0 "register_operand" "=r,r")
6898+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
6899+ (match_operand:SI 2 "arith_operand" "r,Q")))]
6900+ "TARGET_64BIT"
6901+ "addw\t%0,%1,%2"
6902+ [(set_attr "type" "arith")
6903+ (set_attr "mode" "SI")])
6904+
6905+(define_insn "*adddi3_truncsi"
6906+ [(set (match_operand:SI 0 "register_operand" "=r,r")
6907+ (truncate:SI
6908+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
6909+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
6910+ "TARGET_64BIT"
6911+ "addw\t%0,%1,%2"
6912+ [(set_attr "type" "arith")
6913+ (set_attr "mode" "SI")])
6914+
6915+;;
6916+;; ....................
6917+;;
6918+;; SUBTRACTION
6919+;;
6920+;; ....................
6921+;;
6922+
6923+(define_insn "sub<mode>3"
6924+ [(set (match_operand:ANYF 0 "register_operand" "=f")
6925+ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
6926+ (match_operand:ANYF 2 "register_operand" "f")))]
6927+ ""
6928+ "fsub.<fmt>\t%0,%1,%2"
6929+ [(set_attr "type" "fadd")
6930+ (set_attr "mode" "<UNITMODE>")])
6931+
6932+(define_expand "sub<mode>3"
6933+ [(set (match_operand:GPR 0 "register_operand")
6934+ (minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
6935+ (match_operand:GPR 2 "register_operand")))]
6936+ "")
6937+
6938+(define_insn "*subdi3"
6939+ [(set (match_operand:DI 0 "register_operand" "=r")
6940+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
6941+ (match_operand:DI 2 "register_operand" "r")))]
6942+ "TARGET_64BIT"
6943+ "sub\t%0,%z1,%2"
6944+ [(set_attr "type" "arith")
6945+ (set_attr "mode" "DI")])
6946+
6947+(define_insn "*subsi3"
6948+ [(set (match_operand:SI 0 "register_operand" "=r")
6949+ (minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
6950+ (match_operand:GPR2 2 "register_operand" "r")))]
6951+ ""
6952+ { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
6953+ [(set_attr "type" "arith")
6954+ (set_attr "mode" "SI")])
6955+
6956+(define_insn "*subsi3_extended"
6957+ [(set (match_operand:DI 0 "register_operand" "=r")
6958+ (sign_extend:DI
6959+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
6960+ (match_operand:SI 2 "register_operand" "r"))))]
6961+ "TARGET_64BIT"
6962+ "subw\t%0,%z1,%2"
6963+ [(set_attr "type" "arith")
6964+ (set_attr "mode" "DI")])
6965+
6966+(define_insn "*subdisi3"
6967+ [(set (match_operand:SI 0 "register_operand" "=r")
6968+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
6969+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
6970+ "TARGET_64BIT"
6971+ "subw\t%0,%z1,%2"
6972+ [(set_attr "type" "arith")
6973+ (set_attr "mode" "SI")])
6974+
6975+(define_insn "*subdisisi3"
6976+ [(set (match_operand:SI 0 "register_operand" "=r")
6977+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
6978+ (match_operand:SI 2 "register_operand" "r")))]
6979+ "TARGET_64BIT"
6980+ "subw\t%0,%z1,%2"
6981+ [(set_attr "type" "arith")
6982+ (set_attr "mode" "SI")])
6983+
6984+(define_insn "*subsidisi3"
6985+ [(set (match_operand:SI 0 "register_operand" "=r")
6986+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
6987+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
6988+ "TARGET_64BIT"
6989+ "subw\t%0,%z1,%2"
6990+ [(set_attr "type" "arith")
6991+ (set_attr "mode" "SI")])
6992+
6993+(define_insn "*subdi3_truncsi"
6994+ [(set (match_operand:SI 0 "register_operand" "=r,r")
6995+ (truncate:SI
6996+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
6997+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
6998+ "TARGET_64BIT"
6999+ "subw\t%0,%z1,%2"
7000+ [(set_attr "type" "arith")
7001+ (set_attr "mode" "SI")])
7002+
7003+;;
7004+;; ....................
7005+;;
7006+;; MULTIPLICATION
7007+;;
7008+;; ....................
7009+;;
7010+
7011+(define_insn "mul<mode>3"
7012+ [(set (match_operand:SCALARF 0 "register_operand" "=f")
7013+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
7014+ (match_operand:SCALARF 2 "register_operand" "f")))]
7015+ ""
7016+ "fmul.<fmt>\t%0,%1,%2"
7017+ [(set_attr "type" "fmul")
7018+ (set_attr "mode" "<UNITMODE>")])
7019+
7020+(define_expand "mul<mode>3"
7021+ [(set (match_operand:GPR 0 "register_operand")
7022+ (mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
7023+ (match_operand:GPR 2 "register_operand")))]
7024+ "TARGET_MULDIV")
7025+
7026+(define_insn "*mulsi3"
7027+ [(set (match_operand:SI 0 "register_operand" "=r")
7028+ (mult:SI (match_operand:GPR 1 "register_operand" "r")
7029+ (match_operand:GPR2 2 "register_operand" "r")))]
7030+ "TARGET_MULDIV"
7031+ { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
7032+ [(set_attr "type" "imul")
7033+ (set_attr "mode" "SI")])
7034+
7035+(define_insn "*muldisi3"
7036+ [(set (match_operand:SI 0 "register_operand" "=r")
7037+ (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
7038+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
7039+ "TARGET_MULDIV && TARGET_64BIT"
7040+ "mulw\t%0,%1,%2"
7041+ [(set_attr "type" "imul")
7042+ (set_attr "mode" "SI")])
7043+
7044+(define_insn "*muldi3_truncsi"
7045+ [(set (match_operand:SI 0 "register_operand" "=r")
7046+ (truncate:SI
7047+ (mult:DI (match_operand:DI 1 "register_operand" "r")
7048+ (match_operand:DI 2 "register_operand" "r"))))]
7049+ "TARGET_MULDIV && TARGET_64BIT"
7050+ "mulw\t%0,%1,%2"
7051+ [(set_attr "type" "imul")
7052+ (set_attr "mode" "SI")])
7053+
7054+(define_insn "*muldi3"
7055+ [(set (match_operand:DI 0 "register_operand" "=r")
7056+ (mult:DI (match_operand:DI 1 "register_operand" "r")
7057+ (match_operand:DI 2 "register_operand" "r")))]
7058+ "TARGET_MULDIV && TARGET_64BIT"
7059+ "mul\t%0,%1,%2"
7060+ [(set_attr "type" "imul")
7061+ (set_attr "mode" "DI")])
7062+
7063+;;
7064+;; ........................
7065+;;
7066+;; MULTIPLICATION HIGH-PART
7067+;;
7068+;; ........................
7069+;;
7070+
7071+
7072+;; Using a clobber here is ghetto, but I'm not smart enough to do better. '
7073+(define_insn_and_split "<u>mulditi3"
7074+ [(set (match_operand:TI 0 "register_operand" "=r")
7075+ (mult:TI (any_extend:TI
7076+ (match_operand:DI 1 "register_operand" "r"))
7077+ (any_extend:TI
7078+ (match_operand:DI 2 "register_operand" "r"))))
7079+ (clobber (match_scratch:DI 3 "=r"))]
7080+ "TARGET_MULDIV && TARGET_64BIT"
7081+ "#"
7082+ "reload_completed"
7083+ [
7084+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
7085+ (set (match_dup 4) (truncate:DI
7086+ (lshiftrt:TI
7087+ (mult:TI (any_extend:TI (match_dup 1))
7088+ (any_extend:TI (match_dup 2)))
7089+ (const_int 64))))
7090+ (set (match_dup 5) (match_dup 3))
7091+ ]
7092+{
7093+ operands[4] = riscv_subword (operands[0], true);
7094+ operands[5] = riscv_subword (operands[0], false);
7095+}
7096+ )
7097+
7098+(define_insn "<u>muldi3_highpart"
7099+ [(set (match_operand:DI 0 "register_operand" "=r")
7100+ (truncate:DI
7101+ (lshiftrt:TI
7102+ (mult:TI (any_extend:TI
7103+ (match_operand:DI 1 "register_operand" "r"))
7104+ (any_extend:TI
7105+ (match_operand:DI 2 "register_operand" "r")))
7106+ (const_int 64))))]
7107+ "TARGET_MULDIV && TARGET_64BIT"
7108+ "mulh<u>\t%0,%1,%2"
7109+ [(set_attr "type" "imul")
7110+ (set_attr "mode" "DI")])
7111+
7112+
7113+(define_insn_and_split "usmulditi3"
7114+ [(set (match_operand:TI 0 "register_operand" "=r")
7115+ (mult:TI (zero_extend:TI
7116+ (match_operand:DI 1 "register_operand" "r"))
7117+ (sign_extend:TI
7118+ (match_operand:DI 2 "register_operand" "r"))))
7119+ (clobber (match_scratch:DI 3 "=r"))]
7120+ "TARGET_MULDIV && TARGET_64BIT"
7121+ "#"
7122+ "reload_completed"
7123+ [
7124+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
7125+ (set (match_dup 4) (truncate:DI
7126+ (lshiftrt:TI
7127+ (mult:TI (zero_extend:TI (match_dup 1))
7128+ (sign_extend:TI (match_dup 2)))
7129+ (const_int 64))))
7130+ (set (match_dup 5) (match_dup 3))
7131+ ]
7132+{
7133+ operands[4] = riscv_subword (operands[0], true);
7134+ operands[5] = riscv_subword (operands[0], false);
7135+}
7136+ )
7137+
7138+(define_insn "usmuldi3_highpart"
7139+ [(set (match_operand:DI 0 "register_operand" "=r")
7140+ (truncate:DI
7141+ (lshiftrt:TI
7142+ (mult:TI (zero_extend:TI
7143+ (match_operand:DI 1 "register_operand" "r"))
7144+ (sign_extend:TI
7145+ (match_operand:DI 2 "register_operand" "r")))
7146+ (const_int 64))))]
7147+ "TARGET_MULDIV && TARGET_64BIT"
7148+ "mulhsu\t%0,%2,%1"
7149+ [(set_attr "type" "imul")
7150+ (set_attr "mode" "DI")])
7151+
7152+(define_expand "<u>mulsidi3"
7153+ [(set (match_operand:DI 0 "register_operand" "=r")
7154+ (mult:DI (any_extend:DI
7155+ (match_operand:SI 1 "register_operand" "r"))
7156+ (any_extend:DI
7157+ (match_operand:SI 2 "register_operand" "r"))))
7158+ (clobber (match_scratch:SI 3 "=r"))]
7159+ "TARGET_MULDIV && !TARGET_64BIT"
7160+{
7161+ rtx temp = gen_reg_rtx (SImode);
7162+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
7163+ emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
7164+ operands[1], operands[2]));
7165+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
7166+ DONE;
7167+}
7168+ )
7169+
7170+(define_insn "<u>mulsi3_highpart"
7171+ [(set (match_operand:SI 0 "register_operand" "=r")
7172+ (truncate:SI
7173+ (lshiftrt:DI
7174+ (mult:DI (any_extend:DI
7175+ (match_operand:SI 1 "register_operand" "r"))
7176+ (any_extend:DI
7177+ (match_operand:SI 2 "register_operand" "r")))
7178+ (const_int 32))))]
7179+ "TARGET_MULDIV && !TARGET_64BIT"
7180+ "mulh<u>\t%0,%1,%2"
7181+ [(set_attr "type" "imul")
7182+ (set_attr "mode" "SI")])
7183+
7184+
7185+(define_expand "usmulsidi3"
7186+ [(set (match_operand:DI 0 "register_operand" "=r")
7187+ (mult:DI (zero_extend:DI
7188+ (match_operand:SI 1 "register_operand" "r"))
7189+ (sign_extend:DI
7190+ (match_operand:SI 2 "register_operand" "r"))))
7191+ (clobber (match_scratch:SI 3 "=r"))]
7192+ "TARGET_MULDIV && !TARGET_64BIT"
7193+{
7194+ rtx temp = gen_reg_rtx (SImode);
7195+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
7196+ emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
7197+ operands[1], operands[2]));
7198+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
7199+ DONE;
7200+}
7201+ )
7202+
7203+(define_insn "usmulsi3_highpart"
7204+ [(set (match_operand:SI 0 "register_operand" "=r")
7205+ (truncate:SI
7206+ (lshiftrt:DI
7207+ (mult:DI (zero_extend:DI
7208+ (match_operand:SI 1 "register_operand" "r"))
7209+ (sign_extend:DI
7210+ (match_operand:SI 2 "register_operand" "r")))
7211+ (const_int 32))))]
7212+ "TARGET_MULDIV && !TARGET_64BIT"
7213+ "mulhsu\t%0,%2,%1"
7214+ [(set_attr "type" "imul")
7215+ (set_attr "mode" "SI")])
7216+
7217+;;
7218+;; ....................
7219+;;
7220+;; DIVISION and REMAINDER
7221+;;
7222+;; ....................
7223+;;
7224+
7225+(define_insn "<u>divsi3"
7226+ [(set (match_operand:SI 0 "register_operand" "=r")
7227+ (any_div:SI (match_operand:SI 1 "register_operand" "r")
7228+ (match_operand:SI 2 "register_operand" "r")))]
7229+ "TARGET_MULDIV"
7230+ { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
7231+ [(set_attr "type" "idiv")
7232+ (set_attr "mode" "SI")])
7233+
7234+(define_insn "<u>divdi3"
7235+ [(set (match_operand:DI 0 "register_operand" "=r")
7236+ (any_div:DI (match_operand:DI 1 "register_operand" "r")
7237+ (match_operand:DI 2 "register_operand" "r")))]
7238+ "TARGET_MULDIV && TARGET_64BIT"
7239+ "div<u>\t%0,%1,%2"
7240+ [(set_attr "type" "idiv")
7241+ (set_attr "mode" "DI")])
7242+
7243+(define_insn "<u>modsi3"
7244+ [(set (match_operand:SI 0 "register_operand" "=r")
7245+ (any_mod:SI (match_operand:SI 1 "register_operand" "r")
7246+ (match_operand:SI 2 "register_operand" "r")))]
7247+ "TARGET_MULDIV"
7248+ { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
7249+ [(set_attr "type" "idiv")
7250+ (set_attr "mode" "SI")])
7251+
7252+(define_insn "<u>moddi3"
7253+ [(set (match_operand:DI 0 "register_operand" "=r")
7254+ (any_mod:DI (match_operand:DI 1 "register_operand" "r")
7255+ (match_operand:DI 2 "register_operand" "r")))]
7256+ "TARGET_MULDIV && TARGET_64BIT"
7257+ "rem<u>\t%0,%1,%2"
7258+ [(set_attr "type" "idiv")
7259+ (set_attr "mode" "DI")])
7260+
7261+(define_insn "div<mode>3"
7262+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7263+ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
7264+ (match_operand:ANYF 2 "register_operand" "f")))]
7265+ "TARGET_HARD_FLOAT && TARGET_FDIV"
7266+ "fdiv.<fmt>\t%0,%1,%2"
7267+ [(set_attr "type" "fdiv")
7268+ (set_attr "mode" "<UNITMODE>")])
7269+
7270+;;
7271+;; ....................
7272+;;
7273+;; SQUARE ROOT
7274+;;
7275+;; ....................
7276+
7277+(define_insn "sqrt<mode>2"
7278+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7279+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
7280+ "TARGET_HARD_FLOAT && TARGET_FDIV"
7281+{
7282+ return "fsqrt.<fmt>\t%0,%1";
7283+}
7284+ [(set_attr "type" "fsqrt")
7285+ (set_attr "mode" "<UNITMODE>")])
7286+
7287+;; Floating point multiply accumulate instructions.
7288+
7289+(define_insn "fma<mode>4"
7290+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7291+ (fma:ANYF
7292+ (match_operand:ANYF 1 "register_operand" "f")
7293+ (match_operand:ANYF 2 "register_operand" "f")
7294+ (match_operand:ANYF 3 "register_operand" "f")))]
7295+ "TARGET_HARD_FLOAT"
7296+ "fmadd.<fmt>\t%0,%1,%2,%3"
7297+ [(set_attr "type" "fmadd")
7298+ (set_attr "mode" "<UNITMODE>")])
7299+
7300+(define_insn "fms<mode>4"
7301+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7302+ (fma:ANYF
7303+ (match_operand:ANYF 1 "register_operand" "f")
7304+ (match_operand:ANYF 2 "register_operand" "f")
7305+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
7306+ "TARGET_HARD_FLOAT"
7307+ "fmsub.<fmt>\t%0,%1,%2,%3"
7308+ [(set_attr "type" "fmadd")
7309+ (set_attr "mode" "<UNITMODE>")])
7310+
7311+(define_insn "nfma<mode>4"
7312+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7313+ (neg:ANYF
7314+ (fma:ANYF
7315+ (match_operand:ANYF 1 "register_operand" "f")
7316+ (match_operand:ANYF 2 "register_operand" "f")
7317+ (match_operand:ANYF 3 "register_operand" "f"))))]
7318+ "TARGET_HARD_FLOAT"
7319+ "fnmadd.<fmt>\t%0,%1,%2,%3"
7320+ [(set_attr "type" "fmadd")
7321+ (set_attr "mode" "<UNITMODE>")])
7322+
7323+(define_insn "nfms<mode>4"
7324+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7325+ (neg:ANYF
7326+ (fma:ANYF
7327+ (match_operand:ANYF 1 "register_operand" "f")
7328+ (match_operand:ANYF 2 "register_operand" "f")
7329+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
7330+ "TARGET_HARD_FLOAT"
7331+ "fnmsub.<fmt>\t%0,%1,%2,%3"
7332+ [(set_attr "type" "fmadd")
7333+ (set_attr "mode" "<UNITMODE>")])
7334+
7335+;; modulo signed zeros, -(a*b+c) == -c-a*b
7336+(define_insn "*nfma<mode>4_fastmath"
7337+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7338+ (minus:ANYF
7339+ (match_operand:ANYF 3 "register_operand" "f")
7340+ (mult:ANYF
7341+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
7342+ (match_operand:ANYF 2 "register_operand" "f"))))]
7343+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
7344+ "fnmadd.<fmt>\t%0,%1,%2,%3"
7345+ [(set_attr "type" "fmadd")
7346+ (set_attr "mode" "<UNITMODE>")])
7347+
7348+;; modulo signed zeros, -(a*b-c) == c-a*b
7349+(define_insn "*nfms<mode>4_fastmath"
7350+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7351+ (minus:ANYF
7352+ (match_operand:ANYF 3 "register_operand" "f")
7353+ (mult:ANYF
7354+ (match_operand:ANYF 1 "register_operand" "f")
7355+ (match_operand:ANYF 2 "register_operand" "f"))))]
7356+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
7357+ "fnmsub.<fmt>\t%0,%1,%2,%3"
7358+ [(set_attr "type" "fmadd")
7359+ (set_attr "mode" "<UNITMODE>")])
7360+
7361+;;
7362+;; ....................
7363+;;
7364+;; ABSOLUTE VALUE
7365+;;
7366+;; ....................
7367+
7368+(define_insn "abs<mode>2"
7369+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7370+ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
7371+ "TARGET_HARD_FLOAT"
7372+ "fabs.<fmt>\t%0,%1"
7373+ [(set_attr "type" "fmove")
7374+ (set_attr "mode" "<UNITMODE>")])
7375+
7376+
7377+;;
7378+;; ....................
7379+;;
7380+;; MIN/MAX
7381+;;
7382+;; ....................
7383+
7384+(define_insn "smin<mode>3"
7385+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7386+ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
7387+ (match_operand:ANYF 2 "register_operand" "f")))]
7388+ "TARGET_HARD_FLOAT"
7389+ "fmin.<fmt>\t%0,%1,%2"
7390+ [(set_attr "type" "fmove")
7391+ (set_attr "mode" "<UNITMODE>")])
7392+
7393+(define_insn "smax<mode>3"
7394+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7395+ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
7396+ (match_operand:ANYF 2 "register_operand" "f")))]
7397+ "TARGET_HARD_FLOAT"
7398+ "fmax.<fmt>\t%0,%1,%2"
7399+ [(set_attr "type" "fmove")
7400+ (set_attr "mode" "<UNITMODE>")])
7401+
7402+
7403+;;
7404+;; ....................
7405+;;
7406+;; NEGATION and ONE'S COMPLEMENT '
7407+;;
7408+;; ....................
7409+
7410+(define_insn "neg<mode>2"
7411+ [(set (match_operand:ANYF 0 "register_operand" "=f")
7412+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
7413+ "TARGET_HARD_FLOAT"
7414+ "fneg.<fmt>\t%0,%1"
7415+ [(set_attr "type" "fmove")
7416+ (set_attr "mode" "<UNITMODE>")])
7417+
7418+(define_insn "one_cmpl<mode>2"
7419+ [(set (match_operand:GPR 0 "register_operand" "=r")
7420+ (not:GPR (match_operand:GPR 1 "register_operand" "r")))]
7421+ ""
7422+ "not\t%0,%1"
7423+ [(set_attr "type" "logical")
7424+ (set_attr "mode" "<MODE>")])
7425+
7426+;;
7427+;; ....................
7428+;;
7429+;; LOGICAL
7430+;;
7431+;; ....................
7432+;;
7433+
7434+(define_insn "and<mode>3"
7435+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
7436+ (and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
7437+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
7438+ ""
7439+ "and\t%0,%1,%2"
7440+ [(set_attr "type" "logical")
7441+ (set_attr "mode" "<MODE>")])
7442+
7443+(define_insn "ior<mode>3"
7444+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
7445+ (ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
7446+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
7447+ ""
7448+ "or\t%0,%1,%2"
7449+ [(set_attr "type" "logical")
7450+ (set_attr "mode" "<MODE>")])
7451+
7452+(define_insn "xor<mode>3"
7453+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
7454+ (xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
7455+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
7456+ ""
7457+ "xor\t%0,%1,%2"
7458+ [(set_attr "type" "logical")
7459+ (set_attr "mode" "<MODE>")])
7460+
7461+;;
7462+;; ....................
7463+;;
7464+;; TRUNCATION
7465+;;
7466+;; ....................
7467+
7468+(define_insn "truncdfsf2"
7469+ [(set (match_operand:SF 0 "register_operand" "=f")
7470+ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
7471+ "TARGET_HARD_FLOAT"
7472+ "fcvt.s.d\t%0,%1"
7473+ [(set_attr "type" "fcvt")
7474+ (set_attr "cnv_mode" "D2S")
7475+ (set_attr "mode" "SF")])
7476+
7477+;; Integer truncation patterns. Truncating to HImode/QImode is a no-op.
7478+;; Truncating from DImode to SImode is not, because we always keep SImode
7479+;; values sign-extended in a register so we can safely use DImode branches
7480+;; and comparisons on SImode values.
7481+
7482+(define_insn "truncdisi2"
7483+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
7484+ (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
7485+ "TARGET_64BIT"
7486+ "@
7487+ sext.w\t%0,%1
7488+ sw\t%1,%0"
7489+ [(set_attr "move_type" "arith,store")
7490+ (set_attr "mode" "SI")])
7491+
7492+;; Combiner patterns to optimize shift/truncate combinations.
7493+
7494+(define_insn "*ashr_trunc<mode>"
7495+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
7496+ (truncate:SUBDI
7497+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
7498+ (match_operand:DI 2 "const_arith_operand" ""))))]
7499+ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
7500+ "sra\t%0,%1,%2"
7501+ [(set_attr "type" "shift")
7502+ (set_attr "mode" "<MODE>")])
7503+
7504+(define_insn "*lshr32_trunc<mode>"
7505+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
7506+ (truncate:SUBDI
7507+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
7508+ (const_int 32))))]
7509+ "TARGET_64BIT"
7510+ "sra\t%0,%1,32"
7511+ [(set_attr "type" "shift")
7512+ (set_attr "mode" "<MODE>")])
7513+
7514+;;
7515+;; ....................
7516+;;
7517+;; ZERO EXTENSION
7518+;;
7519+;; ....................
7520+
7521+;; Extension insns.
7522+
7523+(define_insn_and_split "zero_extendsidi2"
7524+ [(set (match_operand:DI 0 "register_operand" "=r,r")
7525+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
7526+ "TARGET_64BIT"
7527+ "@
7528+ #
7529+ lwu\t%0,%1"
7530+ "&& reload_completed && REG_P (operands[1])"
7531+ [(set (match_dup 0)
7532+ (ashift:DI (match_dup 1) (const_int 32)))
7533+ (set (match_dup 0)
7534+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
7535+ { operands[1] = gen_lowpart (DImode, operands[1]); }
7536+ [(set_attr "move_type" "shift_shift,load")
7537+ (set_attr "mode" "DI")])
7538+
7539+;; Combine is not allowed to convert this insn into a zero_extendsidi2
7540+;; because of TRULY_NOOP_TRUNCATION.
7541+
7542+(define_insn_and_split "*clear_upper32"
7543+ [(set (match_operand:DI 0 "register_operand" "=r,r")
7544+ (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
7545+ (const_int 4294967295)))]
7546+ "TARGET_64BIT"
7547+{
7548+ if (which_alternative == 0)
7549+ return "#";
7550+
7551+ operands[1] = gen_lowpart (SImode, operands[1]);
7552+ return "lwu\t%0,%1";
7553+}
7554+ "&& reload_completed && REG_P (operands[1])"
7555+ [(set (match_dup 0)
7556+ (ashift:DI (match_dup 1) (const_int 32)))
7557+ (set (match_dup 0)
7558+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
7559+ ""
7560+ [(set_attr "move_type" "shift_shift,load")
7561+ (set_attr "mode" "DI")])
7562+
7563+(define_insn_and_split "zero_extendhi<GPR:mode>2"
7564+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
7565+ (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
7566+ ""
7567+ "@
7568+ #
7569+ lhu\t%0,%1"
7570+ "&& reload_completed && REG_P (operands[1])"
7571+ [(set (match_dup 0)
7572+ (ashift:GPR (match_dup 1) (match_dup 2)))
7573+ (set (match_dup 0)
7574+ (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
7575+ {
7576+ operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
7577+ operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
7578+ }
7579+ [(set_attr "move_type" "shift_shift,load")
7580+ (set_attr "mode" "<GPR:MODE>")])
7581+
7582+(define_insn "zero_extendqi<SUPERQI:mode>2"
7583+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
7584+ (zero_extend:SUPERQI
7585+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
7586+ ""
7587+ "@
7588+ and\t%0,%1,0xff
7589+ lbu\t%0,%1"
7590+ [(set_attr "move_type" "andi,load")
7591+ (set_attr "mode" "<SUPERQI:MODE>")])
7592+
7593+;;
7594+;; ....................
7595+;;
7596+;; SIGN EXTENSION
7597+;;
7598+;; ....................
7599+
7600+;; Extension insns.
7601+;; Those for integer source operand are ordered widest source type first.
7602+
7603+;; When TARGET_64BIT, all SImode integer registers should already be in
7604+;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2). We can
7605+;; therefore get rid of register->register instructions if we constrain
7606+;; the source to be in the same register as the destination.
7607+;;
7608+;; The register alternative has type "arith" so that the pre-reload
7609+;; scheduler will treat it as a move. This reflects what happens if
7610+;; the register alternative needs a reload.
7611+(define_insn_and_split "extendsidi2"
7612+ [(set (match_operand:DI 0 "register_operand" "=r,r")
7613+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
7614+ "TARGET_64BIT"
7615+ "@
7616+ #
7617+ lw\t%0,%1"
7618+ "&& reload_completed && register_operand (operands[1], VOIDmode)"
7619+ [(set (match_dup 0) (match_dup 1))]
7620+{
7621+ if (REGNO (operands[0]) == REGNO (operands[1]))
7622+ {
7623+ emit_note (NOTE_INSN_DELETED);
7624+ DONE;
7625+ }
7626+ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
7627+}
7628+ [(set_attr "move_type" "move,load")
7629+ (set_attr "mode" "DI")])
7630+
7631+(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
7632+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
7633+ (sign_extend:SUPERQI
7634+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
7635+ ""
7636+ "@
7637+ #
7638+ l<SHORT:size>\t%0,%1"
7639+ "&& reload_completed && REG_P (operands[1])"
7640+ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
7641+ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
7642+{
7643+ operands[0] = gen_lowpart (SImode, operands[0]);
7644+ operands[1] = gen_lowpart (SImode, operands[1]);
7645+ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
7646+ - GET_MODE_BITSIZE (<SHORT:MODE>mode));
7647+}
7648+ [(set_attr "move_type" "shift_shift,load")
7649+ (set_attr "mode" "SI")])
7650+
7651+(define_insn "extendsfdf2"
7652+ [(set (match_operand:DF 0 "register_operand" "=f")
7653+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
7654+ "TARGET_HARD_FLOAT"
7655+ "fcvt.d.s\t%0,%1"
7656+ [(set_attr "type" "fcvt")
7657+ (set_attr "cnv_mode" "S2D")
7658+ (set_attr "mode" "DF")])
7659+
7660+;;
7661+;; ....................
7662+;;
7663+;; CONVERSIONS
7664+;;
7665+;; ....................
7666+
7667+(define_insn "fix_truncdfsi2"
7668+ [(set (match_operand:SI 0 "register_operand" "=r")
7669+ (fix:SI (match_operand:DF 1 "register_operand" "f")))]
7670+ "TARGET_HARD_FLOAT"
7671+ "fcvt.w.d %0,%1,rtz"
7672+ [(set_attr "type" "fcvt")
7673+ (set_attr "mode" "DF")
7674+ (set_attr "cnv_mode" "D2I")])
7675+
7676+
7677+(define_insn "fix_truncsfsi2"
7678+ [(set (match_operand:SI 0 "register_operand" "=r")
7679+ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
7680+ "TARGET_HARD_FLOAT"
7681+ "fcvt.w.s %0,%1,rtz"
7682+ [(set_attr "type" "fcvt")
7683+ (set_attr "mode" "SF")
7684+ (set_attr "cnv_mode" "S2I")])
7685+
7686+
7687+(define_insn "fix_truncdfdi2"
7688+ [(set (match_operand:DI 0 "register_operand" "=r")
7689+ (fix:DI (match_operand:DF 1 "register_operand" "f")))]
7690+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7691+ "fcvt.l.d %0,%1,rtz"
7692+ [(set_attr "type" "fcvt")
7693+ (set_attr "mode" "DF")
7694+ (set_attr "cnv_mode" "D2I")])
7695+
7696+
7697+(define_insn "fix_truncsfdi2"
7698+ [(set (match_operand:DI 0 "register_operand" "=r")
7699+ (fix:DI (match_operand:SF 1 "register_operand" "f")))]
7700+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7701+ "fcvt.l.s %0,%1,rtz"
7702+ [(set_attr "type" "fcvt")
7703+ (set_attr "mode" "SF")
7704+ (set_attr "cnv_mode" "S2I")])
7705+
7706+
7707+(define_insn "floatsidf2"
7708+ [(set (match_operand:DF 0 "register_operand" "=f")
7709+ (float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
7710+ "TARGET_HARD_FLOAT"
7711+ "fcvt.d.w\t%0,%z1"
7712+ [(set_attr "type" "fcvt")
7713+ (set_attr "mode" "DF")
7714+ (set_attr "cnv_mode" "I2D")])
7715+
7716+
7717+(define_insn "floatdidf2"
7718+ [(set (match_operand:DF 0 "register_operand" "=f")
7719+ (float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
7720+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7721+ "fcvt.d.l\t%0,%z1"
7722+ [(set_attr "type" "fcvt")
7723+ (set_attr "mode" "DF")
7724+ (set_attr "cnv_mode" "I2D")])
7725+
7726+
7727+(define_insn "floatsisf2"
7728+ [(set (match_operand:SF 0 "register_operand" "=f")
7729+ (float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
7730+ "TARGET_HARD_FLOAT"
7731+ "fcvt.s.w\t%0,%z1"
7732+ [(set_attr "type" "fcvt")
7733+ (set_attr "mode" "SF")
7734+ (set_attr "cnv_mode" "I2S")])
7735+
7736+
7737+(define_insn "floatdisf2"
7738+ [(set (match_operand:SF 0 "register_operand" "=f")
7739+ (float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
7740+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7741+ "fcvt.s.l\t%0,%z1"
7742+ [(set_attr "type" "fcvt")
7743+ (set_attr "mode" "SF")
7744+ (set_attr "cnv_mode" "I2S")])
7745+
7746+
7747+(define_insn "floatunssidf2"
7748+ [(set (match_operand:DF 0 "register_operand" "=f")
7749+ (unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
7750+ "TARGET_HARD_FLOAT"
7751+ "fcvt.d.wu\t%0,%z1"
7752+ [(set_attr "type" "fcvt")
7753+ (set_attr "mode" "DF")
7754+ (set_attr "cnv_mode" "I2D")])
7755+
7756+
7757+(define_insn "floatunsdidf2"
7758+ [(set (match_operand:DF 0 "register_operand" "=f")
7759+ (unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
7760+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7761+ "fcvt.d.lu\t%0,%z1"
7762+ [(set_attr "type" "fcvt")
7763+ (set_attr "mode" "DF")
7764+ (set_attr "cnv_mode" "I2D")])
7765+
7766+
7767+(define_insn "floatunssisf2"
7768+ [(set (match_operand:SF 0 "register_operand" "=f")
7769+ (unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
7770+ "TARGET_HARD_FLOAT"
7771+ "fcvt.s.wu\t%0,%z1"
7772+ [(set_attr "type" "fcvt")
7773+ (set_attr "mode" "SF")
7774+ (set_attr "cnv_mode" "I2S")])
7775+
7776+
7777+(define_insn "floatunsdisf2"
7778+ [(set (match_operand:SF 0 "register_operand" "=f")
7779+ (unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
7780+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7781+ "fcvt.s.lu\t%0,%z1"
7782+ [(set_attr "type" "fcvt")
7783+ (set_attr "mode" "SF")
7784+ (set_attr "cnv_mode" "I2S")])
7785+
7786+
7787+(define_insn "fixuns_truncdfsi2"
7788+ [(set (match_operand:SI 0 "register_operand" "=r")
7789+ (unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
7790+ "TARGET_HARD_FLOAT"
7791+ "fcvt.wu.d %0,%1,rtz"
7792+ [(set_attr "type" "fcvt")
7793+ (set_attr "mode" "DF")
7794+ (set_attr "cnv_mode" "D2I")])
7795+
7796+
7797+(define_insn "fixuns_truncsfsi2"
7798+ [(set (match_operand:SI 0 "register_operand" "=r")
7799+ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
7800+ "TARGET_HARD_FLOAT"
7801+ "fcvt.wu.s %0,%1,rtz"
7802+ [(set_attr "type" "fcvt")
7803+ (set_attr "mode" "SF")
7804+ (set_attr "cnv_mode" "S2I")])
7805+
7806+
7807+(define_insn "fixuns_truncdfdi2"
7808+ [(set (match_operand:DI 0 "register_operand" "=r")
7809+ (unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
7810+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7811+ "fcvt.lu.d %0,%1,rtz"
7812+ [(set_attr "type" "fcvt")
7813+ (set_attr "mode" "DF")
7814+ (set_attr "cnv_mode" "D2I")])
7815+
7816+
7817+(define_insn "fixuns_truncsfdi2"
7818+ [(set (match_operand:DI 0 "register_operand" "=r")
7819+ (unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
7820+ "TARGET_HARD_FLOAT && TARGET_64BIT"
7821+ "fcvt.lu.s %0,%1,rtz"
7822+ [(set_attr "type" "fcvt")
7823+ (set_attr "mode" "SF")
7824+ (set_attr "cnv_mode" "S2I")])
7825+
7826+;;
7827+;; ....................
7828+;;
7829+;; DATA MOVEMENT
7830+;;
7831+;; ....................
7832+
7833+;; Lower-level instructions for loading an address from the GOT.
7834+;; We could use MEMs, but an unspec gives more optimization
7835+;; opportunities.
7836+
7837+(define_insn "got_load<mode>"
7838+ [(set (match_operand:P 0 "register_operand" "=r")
7839+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
7840+ UNSPEC_LOAD_GOT))]
7841+ "flag_pic"
7842+ "la\t%0,%1"
7843+ [(set_attr "got" "load")
7844+ (set_attr "mode" "<MODE>")])
7845+
7846+(define_insn "tls_add_tp_le<mode>"
7847+ [(set (match_operand:P 0 "register_operand" "=r")
7848+ (unspec:P [(match_operand:P 1 "register_operand" "r")
7849+ (match_operand:P 2 "register_operand" "r")
7850+ (match_operand:P 3 "symbolic_operand" "")]
7851+ UNSPEC_TLS_LE))]
7852+ "!flag_pic || flag_pie"
7853+ "add\t%0,%1,%2,%%tprel_add(%3)"
7854+ [(set_attr "type" "arith")
7855+ (set_attr "mode" "<MODE>")])
7856+
7857+(define_insn "got_load_tls_gd<mode>"
7858+ [(set (match_operand:P 0 "register_operand" "=r")
7859+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
7860+ UNSPEC_TLS_GD))]
7861+ "flag_pic"
7862+ "la.tls.gd\t%0,%1"
7863+ [(set_attr "got" "load")
7864+ (set_attr "mode" "<MODE>")])
7865+
7866+(define_insn "got_load_tls_ie<mode>"
7867+ [(set (match_operand:P 0 "register_operand" "=r")
7868+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
7869+ UNSPEC_TLS_IE))]
7870+ "flag_pic"
7871+ "la.tls.ie\t%0,%1"
7872+ [(set_attr "got" "load")
7873+ (set_attr "mode" "<MODE>")])
7874+
7875+;; Instructions for adding the low 16 bits of an address to a register.
7876+;; Operand 2 is the address: riscv_print_operand works out which relocation
7877+;; should be applied.
7878+
7879+(define_insn "*low<mode>"
7880+ [(set (match_operand:P 0 "register_operand" "=r")
7881+ (lo_sum:P (match_operand:P 1 "register_operand" "r")
7882+ (match_operand:P 2 "immediate_operand" "")))]
7883+ ""
7884+ "add\t%0,%1,%R2"
7885+ [(set_attr "alu_type" "add")
7886+ (set_attr "mode" "<MODE>")])
7887+
7888+;; Allow combine to split complex const_int load sequences, using operand 2
7889+;; to store the intermediate results. See move_operand for details.
7890+(define_split
7891+ [(set (match_operand:GPR 0 "register_operand")
7892+ (match_operand:GPR 1 "splittable_const_int_operand"))
7893+ (clobber (match_operand:GPR 2 "register_operand"))]
7894+ ""
7895+ [(const_int 0)]
7896+{
7897+ riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
7898+ DONE;
7899+})
7900+
7901+;; Likewise, for symbolic operands.
7902+(define_split
7903+ [(set (match_operand:P 0 "register_operand")
7904+ (match_operand:P 1))
7905+ (clobber (match_operand:P 2 "register_operand"))]
7906+ "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
7907+ [(set (match_dup 0) (match_dup 3))]
7908+{
7909+ riscv_split_symbol (operands[2], operands[1],
7910+ MAX_MACHINE_MODE, &operands[3]);
7911+})
7912+
7913+;; 64-bit integer moves
7914+
7915+;; Unlike most other insns, the move insns can't be split with '
7916+;; different predicates, because register spilling and other parts of
7917+;; the compiler, have memoized the insn number already.
7918+
7919+(define_expand "movdi"
7920+ [(set (match_operand:DI 0 "")
7921+ (match_operand:DI 1 ""))]
7922+ ""
7923+{
7924+ if (riscv_legitimize_move (DImode, operands[0], operands[1]))
7925+ DONE;
7926+})
7927+
7928+(define_insn "*movdi_32bit"
7929+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
7930+ (match_operand:DI 1 "move_operand" "r,i,m,r,*J*r,*m,*f,*f"))]
7931+ "!TARGET_64BIT
7932+ && (register_operand (operands[0], DImode)
7933+ || reg_or_0_operand (operands[1], DImode))"
7934+ { return riscv_output_move (operands[0], operands[1]); }
7935+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
7936+ (set_attr "mode" "DI")])
7937+
7938+(define_insn "*movdi_64bit"
7939+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
7940+ (match_operand:DI 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
7941+ "TARGET_64BIT
7942+ && (register_operand (operands[0], DImode)
7943+ || reg_or_0_operand (operands[1], DImode))"
7944+ { return riscv_output_move (operands[0], operands[1]); }
7945+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
7946+ (set_attr "mode" "DI")])
7947+
7948+;; 32-bit Integer moves
7949+
7950+;; Unlike most other insns, the move insns can't be split with
7951+;; different predicates, because register spilling and other parts of
7952+;; the compiler, have memoized the insn number already.
7953+
7954+(define_expand "mov<mode>"
7955+ [(set (match_operand:IMOVE32 0 "")
7956+ (match_operand:IMOVE32 1 ""))]
7957+ ""
7958+{
7959+ if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
7960+ DONE;
7961+})
7962+
7963+;; The difference between these two is whether or not ints are allowed
7964+;; in FP registers (off by default, use -mdebugh to enable).
7965+
7966+(define_insn "*mov<mode>_internal"
7967+ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
7968+ (match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
7969+ "(register_operand (operands[0], <MODE>mode)
7970+ || reg_or_0_operand (operands[1], <MODE>mode))"
7971+ { return riscv_output_move (operands[0], operands[1]); }
7972+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
7973+ (set_attr "mode" "SI")])
7974+
7975+;; 16-bit Integer moves
7976+
7977+;; Unlike most other insns, the move insns can't be split with
7978+;; different predicates, because register spilling and other parts of
7979+;; the compiler, have memoized the insn number already.
7980+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
7981+
7982+(define_expand "movhi"
7983+ [(set (match_operand:HI 0 "")
7984+ (match_operand:HI 1 ""))]
7985+ ""
7986+{
7987+ if (riscv_legitimize_move (HImode, operands[0], operands[1]))
7988+ DONE;
7989+})
7990+
7991+(define_insn "*movhi_internal"
7992+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
7993+ (match_operand:HI 1 "move_operand" "r,T,m,rJ,*r*J,*f"))]
7994+ "(register_operand (operands[0], HImode)
7995+ || reg_or_0_operand (operands[1], HImode))"
7996+ { return riscv_output_move (operands[0], operands[1]); }
7997+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
7998+ (set_attr "mode" "HI")])
7999+
8000+;; HImode constant generation; see riscv_move_integer for details.
8001+;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
8002+
8003+(define_insn "add<mode>hi3"
8004+ [(set (match_operand:HI 0 "register_operand" "=r,r")
8005+ (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
8006+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
8007+ ""
8008+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
8009+ [(set_attr "type" "arith")
8010+ (set_attr "mode" "HI")])
8011+
8012+(define_insn "xor<mode>hi3"
8013+ [(set (match_operand:HI 0 "register_operand" "=r,r")
8014+ (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
8015+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
8016+ ""
8017+ "xor\t%0,%1,%2"
8018+ [(set_attr "type" "logical")
8019+ (set_attr "mode" "HI")])
8020+
8021+;; 8-bit Integer moves
8022+
8023+(define_expand "movqi"
8024+ [(set (match_operand:QI 0 "")
8025+ (match_operand:QI 1 ""))]
8026+ ""
8027+{
8028+ if (riscv_legitimize_move (QImode, operands[0], operands[1]))
8029+ DONE;
8030+})
8031+
8032+(define_insn "*movqi_internal"
8033+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
8034+ (match_operand:QI 1 "move_operand" "r,I,m,rJ,*r*J,*f"))]
8035+ "(register_operand (operands[0], QImode)
8036+ || reg_or_0_operand (operands[1], QImode))"
8037+ { return riscv_output_move (operands[0], operands[1]); }
8038+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
8039+ (set_attr "mode" "QI")])
8040+
8041+;; 32-bit floating point moves
8042+
8043+(define_expand "movsf"
8044+ [(set (match_operand:SF 0 "")
8045+ (match_operand:SF 1 ""))]
8046+ ""
8047+{
8048+ if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
8049+ DONE;
8050+})
8051+
8052+(define_insn "*movsf_hardfloat"
8053+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
8054+ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
8055+ "TARGET_HARD_FLOAT
8056+ && (register_operand (operands[0], SFmode)
8057+ || reg_or_0_operand (operands[1], SFmode))"
8058+ { return riscv_output_move (operands[0], operands[1]); }
8059+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
8060+ (set_attr "mode" "SF")])
8061+
8062+(define_insn "*movsf_softfloat"
8063+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
8064+ (match_operand:SF 1 "move_operand" "Gr,m,r"))]
8065+ "TARGET_SOFT_FLOAT
8066+ && (register_operand (operands[0], SFmode)
8067+ || reg_or_0_operand (operands[1], SFmode))"
8068+ { return riscv_output_move (operands[0], operands[1]); }
8069+ [(set_attr "move_type" "move,load,store")
8070+ (set_attr "mode" "SF")])
8071+
8072+;; 64-bit floating point moves
8073+
8074+(define_expand "movdf"
8075+ [(set (match_operand:DF 0 "")
8076+ (match_operand:DF 1 ""))]
8077+ ""
8078+{
8079+ if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
8080+ DONE;
8081+})
8082+
8083+;; In RV32, we lack mtf.d/mff.d. Go through memory instead.
8084+;; (except for moving a constant 0 to an FPR. for that we use fcvt.d.w.)
8085+(define_insn "*movdf_hardfloat_rv32"
8086+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
8087+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
8088+ "!TARGET_64BIT && TARGET_HARD_FLOAT
8089+ && (register_operand (operands[0], DFmode)
8090+ || reg_or_0_operand (operands[1], DFmode))"
8091+ { return riscv_output_move (operands[0], operands[1]); }
8092+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
8093+ (set_attr "mode" "DF")])
8094+
8095+(define_insn "*movdf_hardfloat_rv64"
8096+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
8097+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
8098+ "TARGET_64BIT && TARGET_HARD_FLOAT
8099+ && (register_operand (operands[0], DFmode)
8100+ || reg_or_0_operand (operands[1], DFmode))"
8101+ { return riscv_output_move (operands[0], operands[1]); }
8102+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
8103+ (set_attr "mode" "DF")])
8104+
8105+(define_insn "*movdf_softfloat"
8106+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
8107+ (match_operand:DF 1 "move_operand" "rG,m,rG"))]
8108+ "TARGET_SOFT_FLOAT
8109+ && (register_operand (operands[0], DFmode)
8110+ || reg_or_0_operand (operands[1], DFmode))"
8111+ { return riscv_output_move (operands[0], operands[1]); }
8112+ [(set_attr "move_type" "move,load,store")
8113+ (set_attr "mode" "DF")])
8114+
8115+;; 128-bit integer moves
8116+
8117+(define_expand "movti"
8118+ [(set (match_operand:TI 0)
8119+ (match_operand:TI 1))]
8120+ "TARGET_64BIT"
8121+{
8122+ if (riscv_legitimize_move (TImode, operands[0], operands[1]))
8123+ DONE;
8124+})
8125+
8126+(define_insn "*movti"
8127+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m")
8128+ (match_operand:TI 1 "move_operand" "r,i,m,rJ"))]
8129+ "TARGET_64BIT
8130+ && (register_operand (operands[0], TImode)
8131+ || reg_or_0_operand (operands[1], TImode))"
8132+ "#"
8133+ [(set_attr "move_type" "move,const,load,store")
8134+ (set_attr "mode" "TI")])
8135+
8136+(define_split
8137+ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
8138+ (match_operand:MOVE64 1 "move_operand"))]
8139+ "reload_completed && !TARGET_64BIT
8140+ && riscv_split_64bit_move_p (operands[0], operands[1])"
8141+ [(const_int 0)]
8142+{
8143+ riscv_split_doubleword_move (operands[0], operands[1]);
8144+ DONE;
8145+})
8146+
8147+(define_split
8148+ [(set (match_operand:MOVE128 0 "nonimmediate_operand")
8149+ (match_operand:MOVE128 1 "move_operand"))]
8150+ "TARGET_64BIT && reload_completed"
8151+ [(const_int 0)]
8152+{
8153+ riscv_split_doubleword_move (operands[0], operands[1]);
8154+ DONE;
8155+})
8156+
8157+;; 64-bit paired-single floating point moves
8158+
8159+;; Load the low word of operand 0 with operand 1.
8160+(define_insn "load_low<mode>"
8161+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
8162+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")]
8163+ UNSPEC_LOAD_LOW))]
8164+ "TARGET_HARD_FLOAT"
8165+{
8166+ operands[0] = riscv_subword (operands[0], 0);
8167+ return riscv_output_move (operands[0], operands[1]);
8168+}
8169+ [(set_attr "move_type" "mtc,fpload")
8170+ (set_attr "mode" "<HALFMODE>")])
8171+
8172+;; Load the high word of operand 0 from operand 1, preserving the value
8173+;; in the low word.
8174+(define_insn "load_high<mode>"
8175+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
8176+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")
8177+ (match_operand:SPLITF 2 "register_operand" "0,0")]
8178+ UNSPEC_LOAD_HIGH))]
8179+ "TARGET_HARD_FLOAT"
8180+{
8181+ operands[0] = riscv_subword (operands[0], 1);
8182+ return riscv_output_move (operands[0], operands[1]);
8183+}
8184+ [(set_attr "move_type" "mtc,fpload")
8185+ (set_attr "mode" "<HALFMODE>")])
8186+
8187+;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the
8188+;; high word and 0 to store the low word.
8189+(define_insn "store_word<mode>"
8190+ [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=r,m")
8191+ (unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
8192+ (match_operand 2 "const_int_operand")]
8193+ UNSPEC_STORE_WORD))]
8194+ "TARGET_HARD_FLOAT"
8195+{
8196+ operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
8197+ return riscv_output_move (operands[0], operands[1]);
8198+}
8199+ [(set_attr "move_type" "mfc,fpstore")
8200+ (set_attr "mode" "<HALFMODE>")])
8201+
8202+;; Expand in-line code to clear the instruction cache between operand[0] and
8203+;; operand[1].
8204+(define_expand "clear_cache"
8205+ [(match_operand 0 "pmode_register_operand")
8206+ (match_operand 1 "pmode_register_operand")]
8207+ ""
8208+ "
8209+{
8210+ emit_insn(gen_fence_i());
8211+ DONE;
8212+}")
8213+
8214+(define_insn "fence"
8215+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
8216+ ""
8217+ "%|fence%-")
8218+
8219+(define_insn "fence_i"
8220+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
8221+ ""
8222+ "fence.i")
8223+
8224+;; Block moves, see riscv.c for more details.
8225+;; Argument 0 is the destination
8226+;; Argument 1 is the source
8227+;; Argument 2 is the length
8228+;; Argument 3 is the alignment
8229+
8230+(define_expand "movmemsi"
8231+ [(parallel [(set (match_operand:BLK 0 "general_operand")
8232+ (match_operand:BLK 1 "general_operand"))
8233+ (use (match_operand:SI 2 ""))
8234+ (use (match_operand:SI 3 "const_int_operand"))])]
8235+ "!TARGET_MEMCPY"
8236+{
8237+ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
8238+ DONE;
8239+ else
8240+ FAIL;
8241+})
8242+
8243+;;
8244+;; ....................
8245+;;
8246+;; SHIFTS
8247+;;
8248+;; ....................
8249+
8250+(define_insn "<optab>si3"
8251+ [(set (match_operand:SI 0 "register_operand" "=r")
8252+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
8253+ (match_operand:SI 2 "arith_operand" "rI")))]
8254+ ""
8255+{
8256+ if (GET_CODE (operands[2]) == CONST_INT)
8257+ operands[2] = GEN_INT (INTVAL (operands[2])
8258+ & (GET_MODE_BITSIZE (SImode) - 1));
8259+
8260+ return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
8261+}
8262+ [(set_attr "type" "shift")
8263+ (set_attr "mode" "SI")])
8264+
8265+(define_insn "*<optab>disi3"
8266+ [(set (match_operand:SI 0 "register_operand" "=r")
8267+ (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
8268+ (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
8269+ "TARGET_64BIT"
8270+ "<insn>w\t%0,%1,%2"
8271+ [(set_attr "type" "shift")
8272+ (set_attr "mode" "SI")])
8273+
8274+(define_insn "*ashldi3_truncsi"
8275+ [(set (match_operand:SI 0 "register_operand" "=r")
8276+ (truncate:SI
8277+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
8278+ (match_operand:DI 2 "const_arith_operand" "I"))))]
8279+ "TARGET_64BIT && INTVAL (operands[2]) < 32"
8280+ "sllw\t%0,%1,%2"
8281+ [(set_attr "type" "shift")
8282+ (set_attr "mode" "SI")])
8283+
8284+(define_insn "*ashldisi3"
8285+ [(set (match_operand:SI 0 "register_operand" "=r")
8286+ (ashift:SI (match_operand:GPR 1 "register_operand" "r")
8287+ (match_operand:GPR2 2 "arith_operand" "rI")))]
8288+ "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
8289+ "sllw\t%0,%1,%2"
8290+ [(set_attr "type" "shift")
8291+ (set_attr "mode" "SI")])
8292+
8293+(define_insn "<optab>di3"
8294+ [(set (match_operand:DI 0 "register_operand" "=r")
8295+ (any_shift:DI (match_operand:DI 1 "register_operand" "r")
8296+ (match_operand:DI 2 "arith_operand" "rI")))]
8297+ "TARGET_64BIT"
8298+{
8299+ if (GET_CODE (operands[2]) == CONST_INT)
8300+ operands[2] = GEN_INT (INTVAL (operands[2])
8301+ & (GET_MODE_BITSIZE (DImode) - 1));
8302+
8303+ return "<insn>\t%0,%1,%2";
8304+}
8305+ [(set_attr "type" "shift")
8306+ (set_attr "mode" "DI")])
8307+
8308+(define_insn "<optab>si3_extend"
8309+ [(set (match_operand:DI 0 "register_operand" "=r")
8310+ (sign_extend:DI
8311+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
8312+ (match_operand:SI 2 "arith_operand" "rI"))))]
8313+ "TARGET_64BIT"
8314+{
8315+ if (GET_CODE (operands[2]) == CONST_INT)
8316+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
8317+
8318+ return "<insn>w\t%0,%1,%2";
8319+}
8320+ [(set_attr "type" "shift")
8321+ (set_attr "mode" "SI")])
8322+
8323+;;
8324+;; ....................
8325+;;
8326+;; CONDITIONAL BRANCHES
8327+;;
8328+;; ....................
8329+
8330+;; Conditional branches
8331+
8332+(define_insn "*branch_order<mode>"
8333+ [(set (pc)
8334+ (if_then_else
8335+ (match_operator 1 "order_operator"
8336+ [(match_operand:GPR 2 "register_operand" "r")
8337+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
8338+ (label_ref (match_operand 0 "" ""))
8339+ (pc)))]
8340+ ""
8341+ "b%C1\t%2,%z3,%0"
8342+ [(set_attr "type" "branch")
8343+ (set_attr "mode" "none")])
8344+
8345+;; Used to implement built-in functions.
8346+(define_expand "condjump"
8347+ [(set (pc)
8348+ (if_then_else (match_operand 0)
8349+ (label_ref (match_operand 1))
8350+ (pc)))])
8351+
8352+(define_expand "cbranch<mode>4"
8353+ [(set (pc)
8354+ (if_then_else (match_operator 0 "comparison_operator"
8355+ [(match_operand:GPR 1 "register_operand")
8356+ (match_operand:GPR 2 "nonmemory_operand")])
8357+ (label_ref (match_operand 3 ""))
8358+ (pc)))]
8359+ ""
8360+{
8361+ riscv_expand_conditional_branch (operands);
8362+ DONE;
8363+})
8364+
8365+(define_expand "cbranch<mode>4"
8366+ [(set (pc)
8367+ (if_then_else (match_operator 0 "comparison_operator"
8368+ [(match_operand:SCALARF 1 "register_operand")
8369+ (match_operand:SCALARF 2 "register_operand")])
8370+ (label_ref (match_operand 3 ""))
8371+ (pc)))]
8372+ ""
8373+{
8374+ riscv_expand_conditional_branch (operands);
8375+ DONE;
8376+})
8377+
8378+(define_insn_and_split "*branch_on_bit<GPR:mode>"
8379+ [(set (pc)
8380+ (if_then_else
8381+ (match_operator 0 "equality_operator"
8382+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
8383+ (const_int 1)
8384+ (match_operand 3 "const_int_operand"))
8385+ (const_int 0)])
8386+ (label_ref (match_operand 1))
8387+ (pc)))
8388+ (clobber (match_scratch:GPR 4 "=&r"))]
8389+ ""
8390+ "#"
8391+ "reload_completed"
8392+ [(set (match_dup 4)
8393+ (ashift:GPR (match_dup 2) (match_dup 3)))
8394+ (set (pc)
8395+ (if_then_else
8396+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
8397+ (label_ref (match_operand 1))
8398+ (pc)))]
8399+{
8400+ int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
8401+ operands[3] = GEN_INT (shift);
8402+
8403+ if (GET_CODE (operands[0]) == EQ)
8404+ operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
8405+ else
8406+ operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
8407+})
8408+
8409+(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
8410+ [(set (pc)
8411+ (if_then_else
8412+ (match_operator 0 "equality_operator"
8413+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
8414+ (match_operand 3 "const_int_operand")
8415+ (const_int 0))
8416+ (const_int 0)])
8417+ (label_ref (match_operand 1))
8418+ (pc)))
8419+ (clobber (match_scratch:GPR 4 "=&r"))]
8420+ ""
8421+ "#"
8422+ "reload_completed"
8423+ [(set (match_dup 4)
8424+ (ashift:GPR (match_dup 2) (match_dup 3)))
8425+ (set (pc)
8426+ (if_then_else
8427+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
8428+ (label_ref (match_operand 1))
8429+ (pc)))]
8430+{
8431+ operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
8432+})
8433+
8434+;;
8435+;; ....................
8436+;;
8437+;; SETTING A REGISTER FROM A COMPARISON
8438+;;
8439+;; ....................
8440+
8441+;; Destination is always set in SI mode.
8442+
8443+(define_expand "cstore<mode>4"
8444+ [(set (match_operand:SI 0 "register_operand")
8445+ (match_operator:SI 1 "order_operator"
8446+ [(match_operand:GPR 2 "register_operand")
8447+ (match_operand:GPR 3 "nonmemory_operand")]))]
8448+ ""
8449+{
8450+ riscv_expand_scc (operands);
8451+ DONE;
8452+})
8453+
8454+(define_insn "cstore<mode>4"
8455+ [(set (match_operand:SI 0 "register_operand" "=r")
8456+ (match_operator:SI 1 "fp_order_operator"
8457+ [(match_operand:SCALARF 2 "register_operand" "f")
8458+ (match_operand:SCALARF 3 "register_operand" "f")]))]
8459+ "TARGET_HARD_FLOAT"
8460+ "f%C1.<fmt>\t%0,%2,%3"
8461+ [(set_attr "type" "fcmp")
8462+ (set_attr "mode" "<UNITMODE>")])
8463+
8464+(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
8465+ [(set (match_operand:GPR2 0 "register_operand" "=r")
8466+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
8467+ (const_int 0)))]
8468+ ""
8469+ "seqz\t%0,%1"
8470+ [(set_attr "type" "slt")
8471+ (set_attr "mode" "<GPR:MODE>")])
8472+
8473+(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
8474+ [(set (match_operand:GPR2 0 "register_operand" "=r")
8475+ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
8476+ (const_int 0)))]
8477+ ""
8478+ "snez\t%0,%1"
8479+ [(set_attr "type" "slt")
8480+ (set_attr "mode" "<GPR:MODE>")])
8481+
8482+(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
8483+ [(set (match_operand:GPR2 0 "register_operand" "=r")
8484+ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
8485+ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
8486+ ""
8487+ "slt<u>\t%0,%z2,%1"
8488+ [(set_attr "type" "slt")
8489+ (set_attr "mode" "<GPR:MODE>")])
8490+
8491+(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
8492+ [(set (match_operand:GPR2 0 "register_operand" "=r")
8493+ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
8494+ (const_int 1)))]
8495+ ""
8496+ "slt<u>\t%0,zero,%1"
8497+ [(set_attr "type" "slt")
8498+ (set_attr "mode" "<GPR:MODE>")])
8499+
8500+(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
8501+ [(set (match_operand:GPR2 0 "register_operand" "=r")
8502+ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
8503+ (match_operand:GPR 2 "arith_operand" "rI")))]
8504+ ""
8505+ "slt<u>\t%0,%1,%2"
8506+ [(set_attr "type" "slt")
8507+ (set_attr "mode" "<GPR:MODE>")])
8508+
8509+(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
8510+ [(set (match_operand:GPR2 0 "register_operand" "=r")
8511+ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
8512+ (match_operand:GPR 2 "sle_operand" "")))]
8513+ ""
8514+{
8515+ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
8516+ return "slt<u>\t%0,%1,%2";
8517+}
8518+ [(set_attr "type" "slt")
8519+ (set_attr "mode" "<GPR:MODE>")])
8520+
8521+;;
8522+;; ....................
8523+;;
8524+;; UNCONDITIONAL BRANCHES
8525+;;
8526+;; ....................
8527+
8528+;; Unconditional branches.
8529+
8530+(define_insn "jump"
8531+ [(set (pc)
8532+ (label_ref (match_operand 0 "" "")))]
8533+ ""
8534+ "j\t%l0"
8535+ [(set_attr "type" "jump")
8536+ (set_attr "mode" "none")])
8537+
8538+(define_expand "indirect_jump"
8539+ [(set (pc) (match_operand 0 "register_operand"))]
8540+ ""
8541+{
8542+ operands[0] = force_reg (Pmode, operands[0]);
8543+ if (Pmode == SImode)
8544+ emit_jump_insn (gen_indirect_jumpsi (operands[0]));
8545+ else
8546+ emit_jump_insn (gen_indirect_jumpdi (operands[0]));
8547+ DONE;
8548+})
8549+
8550+(define_insn "indirect_jump<mode>"
8551+ [(set (pc) (match_operand:P 0 "register_operand" "r"))]
8552+ ""
8553+ "jr\t%0"
8554+ [(set_attr "type" "jump")
8555+ (set_attr "mode" "none")])
8556+
8557+(define_expand "tablejump"
8558+ [(set (pc) (match_operand 0 "register_operand" ""))
8559+ (use (label_ref (match_operand 1 "" "")))]
8560+ ""
8561+{
8562+ if (flag_pic)
8563+ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
8564+ gen_rtx_LABEL_REF (Pmode, operands[1]),
8565+ NULL_RTX, 0, OPTAB_DIRECT);
8566+
8567+ if (flag_pic && Pmode == DImode)
8568+ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
8569+ else
8570+ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
8571+ DONE;
8572+})
8573+
8574+(define_insn "tablejump<mode>"
8575+ [(set (pc) (match_operand:GPR 0 "register_operand" "r"))
8576+ (use (label_ref (match_operand 1 "" "")))]
8577+ ""
8578+ "jr\t%0"
8579+ [(set_attr "type" "jump")
8580+ (set_attr "mode" "none")])
8581+
8582+;;
8583+;; ....................
8584+;;
8585+;; Function prologue/epilogue
8586+;;
8587+;; ....................
8588+;;
8589+
8590+(define_expand "prologue"
8591+ [(const_int 1)]
8592+ ""
8593+{
8594+ riscv_expand_prologue ();
8595+ DONE;
8596+})
8597+
8598+;; Block any insns from being moved before this point, since the
8599+;; profiling call to mcount can use various registers that aren't
8600+;; saved or used to pass arguments.
8601+
8602+(define_insn "blockage"
8603+ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
8604+ ""
8605+ ""
8606+ [(set_attr "type" "ghost")
8607+ (set_attr "mode" "none")])
8608+
8609+(define_expand "epilogue"
8610+ [(const_int 2)]
8611+ ""
8612+{
8613+ riscv_expand_epilogue (false);
8614+ DONE;
8615+})
8616+
8617+(define_expand "sibcall_epilogue"
8618+ [(const_int 2)]
8619+ ""
8620+{
8621+ riscv_expand_epilogue (true);
8622+ DONE;
8623+})
8624+
8625+;; Trivial return. Make it look like a normal return insn as that
8626+;; allows jump optimizations to work better.
8627+
8628+(define_expand "return"
8629+ [(simple_return)]
8630+ "riscv_can_use_return_insn ()"
8631+ "")
8632+
8633+(define_insn "simple_return"
8634+ [(simple_return)]
8635+ ""
8636+ "ret"
8637+ [(set_attr "type" "jump")
8638+ (set_attr "mode" "none")])
8639+
8640+;; Normal return.
8641+
8642+(define_insn "simple_return_internal"
8643+ [(simple_return)
8644+ (use (match_operand 0 "pmode_register_operand" ""))]
8645+ ""
8646+ "jr\t%0"
8647+ [(set_attr "type" "jump")
8648+ (set_attr "mode" "none")])
8649+
8650+;; This is used in compiling the unwind routines.
8651+(define_expand "eh_return"
8652+ [(use (match_operand 0 "general_operand"))]
8653+ ""
8654+{
8655+ if (GET_MODE (operands[0]) != word_mode)
8656+ operands[0] = convert_to_mode (word_mode, operands[0], 0);
8657+ if (TARGET_64BIT)
8658+ emit_insn (gen_eh_set_lr_di (operands[0]));
8659+ else
8660+ emit_insn (gen_eh_set_lr_si (operands[0]));
8661+ DONE;
8662+})
8663+
8664+;; Clobber the return address on the stack. We can't expand this
8665+;; until we know where it will be put in the stack frame.
8666+
8667+(define_insn "eh_set_lr_si"
8668+ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
8669+ (clobber (match_scratch:SI 1 "=&r"))]
8670+ "! TARGET_64BIT"
8671+ "#")
8672+
8673+(define_insn "eh_set_lr_di"
8674+ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
8675+ (clobber (match_scratch:DI 1 "=&r"))]
8676+ "TARGET_64BIT"
8677+ "#")
8678+
8679+(define_split
8680+ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
8681+ (clobber (match_scratch 1))]
8682+ "reload_completed"
8683+ [(const_int 0)]
8684+{
8685+ riscv_set_return_address (operands[0], operands[1]);
8686+ DONE;
8687+})
8688+
8689+;;
8690+;; ....................
8691+;;
8692+;; FUNCTION CALLS
8693+;;
8694+;; ....................
8695+
8696+;; Sibling calls. All these patterns use jump instructions.
8697+
8698+;; call_insn_operand will only accept constant
8699+;; addresses if a direct jump is acceptable. Since the 'S' constraint
8700+;; is defined in terms of call_insn_operand, the same is true of the
8701+;; constraints.
8702+
8703+;; When we use an indirect jump, we need a register that will be
8704+;; preserved by the epilogue (constraint j).
8705+
8706+(define_expand "sibcall"
8707+ [(parallel [(call (match_operand 0 "")
8708+ (match_operand 1 ""))
8709+ (use (match_operand 2 "")) ;; next_arg_reg
8710+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
8711+ ""
8712+{
8713+ riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
8714+ DONE;
8715+})
8716+
8717+(define_insn "sibcall_internal"
8718+ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
8719+ (match_operand 1 "" ""))]
8720+ "SIBLING_CALL_P (insn)"
8721+ { return REG_P (operands[0]) ? "jr\t%0"
8722+ : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
8723+ : "tail\t%0@"; }
8724+ [(set_attr "type" "call")])
8725+
8726+(define_expand "sibcall_value"
8727+ [(parallel [(set (match_operand 0 "")
8728+ (call (match_operand 1 "")
8729+ (match_operand 2 "")))
8730+ (use (match_operand 3 ""))])] ;; next_arg_reg
8731+ ""
8732+{
8733+ riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
8734+ DONE;
8735+})
8736+
8737+(define_insn "sibcall_value_internal"
8738+ [(set (match_operand 0 "register_operand" "")
8739+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
8740+ (match_operand 2 "" "")))]
8741+ "SIBLING_CALL_P (insn)"
8742+ { return REG_P (operands[1]) ? "jr\t%1"
8743+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
8744+ : "tail\t%1@"; }
8745+ [(set_attr "type" "call")])
8746+
8747+(define_insn "sibcall_value_multiple_internal"
8748+ [(set (match_operand 0 "register_operand" "")
8749+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
8750+ (match_operand 2 "" "")))
8751+ (set (match_operand 3 "register_operand" "")
8752+ (call (mem:SI (match_dup 1))
8753+ (match_dup 2)))
8754+ (clobber (match_scratch:SI 4 "=j,j"))]
8755+ "SIBLING_CALL_P (insn)"
8756+ { return REG_P (operands[1]) ? "jr\t%1"
8757+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
8758+ : "tail\t%1@"; }
8759+ [(set_attr "type" "call")])
8760+
8761+(define_expand "call"
8762+ [(parallel [(call (match_operand 0 "")
8763+ (match_operand 1 ""))
8764+ (use (match_operand 2 "")) ;; next_arg_reg
8765+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
8766+ ""
8767+{
8768+ riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
8769+ DONE;
8770+})
8771+
8772+(define_insn "call_internal"
8773+ [(call (mem:SI (match_operand 0 "call_insn_operand" "r,S"))
8774+ (match_operand 1 "" ""))
8775+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
8776+ ""
8777+ { return REG_P (operands[0]) ? "jalr\t%0"
8778+ : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
8779+ : "call\t%0@"; }
8780+ [(set_attr "jal" "indirect,direct")])
8781+
8782+(define_expand "call_value"
8783+ [(parallel [(set (match_operand 0 "")
8784+ (call (match_operand 1 "")
8785+ (match_operand 2 "")))
8786+ (use (match_operand 3 ""))])] ;; next_arg_reg
8787+ ""
8788+{
8789+ riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
8790+ DONE;
8791+})
8792+
8793+;; See comment for call_internal.
8794+(define_insn "call_value_internal"
8795+ [(set (match_operand 0 "register_operand" "")
8796+ (call (mem:SI (match_operand 1 "call_insn_operand" "r,S"))
8797+ (match_operand 2 "" "")))
8798+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
8799+ ""
8800+ { return REG_P (operands[1]) ? "jalr\t%1"
8801+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
8802+ : "call\t%1@"; }
8803+ [(set_attr "jal" "indirect,direct")])
8804+
8805+;; See comment for call_internal.
8806+(define_insn "call_value_multiple_internal"
8807+ [(set (match_operand 0 "register_operand" "")
8808+ (call (mem:SI (match_operand 1 "call_insn_operand" "r,S"))
8809+ (match_operand 2 "" "")))
8810+ (set (match_operand 3 "register_operand" "")
8811+ (call (mem:SI (match_dup 1))
8812+ (match_dup 2)))
8813+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
8814+ ""
8815+ { return REG_P (operands[1]) ? "jalr\t%1"
8816+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
8817+ : "call\t%1@"; }
8818+ [(set_attr "jal" "indirect,direct")])
8819+
8820+;; Call subroutine returning any type.
8821+
8822+(define_expand "untyped_call"
8823+ [(parallel [(call (match_operand 0 "")
8824+ (const_int 0))
8825+ (match_operand 1 "")
8826+ (match_operand 2 "")])]
8827+ ""
8828+{
8829+ int i;
8830+
8831+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
8832+
8833+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
8834+ {
8835+ rtx set = XVECEXP (operands[2], 0, i);
8836+ riscv_emit_move (SET_DEST (set), SET_SRC (set));
8837+ }
8838+
8839+ emit_insn (gen_blockage ());
8840+ DONE;
8841+})
8842+
8843+(define_insn "nop"
8844+ [(const_int 0)]
8845+ ""
8846+ "nop"
8847+ [(set_attr "type" "nop")
8848+ (set_attr "mode" "none")])
8849+
8850+(define_insn "trap"
8851+ [(trap_if (const_int 1) (const_int 0))]
8852+ ""
8853+ "sbreak")
8854+
8855+(include "sync.md")
8856+(include "peephole.md")
8857diff -urN original-gcc/gcc/config/riscv/riscv-modes.def gcc/gcc/config/riscv/riscv-modes.def
8858--- original-gcc/gcc/config/riscv/riscv-modes.def 1970-01-01 01:00:00.000000000 +0100
8859+++ gcc-4.9.2/gcc/config/riscv/riscv-modes.def 2015-03-07 09:51:45.663139025 +0100
8860@@ -0,0 +1,26 @@
8861+/* Extra machine modes for RISC-V target.
8862+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
8863+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
8864+ Based on MIPS target for GNU compiler.
8865+
8866+This file is part of GCC.
8867+
8868+GCC is free software; you can redistribute it and/or modify
8869+it under the terms of the GNU General Public License as published by
8870+the Free Software Foundation; either version 3, or (at your option)
8871+any later version.
8872+
8873+GCC is distributed in the hope that it will be useful,
8874+but WITHOUT ANY WARRANTY; without even the implied warranty of
8875+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8876+GNU General Public License for more details.
8877+
8878+You should have received a copy of the GNU General Public License
8879+along with GCC; see the file COPYING3. If not see
8880+<http://www.gnu.org/licenses/>. */
8881+
8882+FLOAT_MODE (TF, 16, ieee_quad_format);
8883+
8884+/* Vector modes. */
8885+VECTOR_MODES (INT, 4); /* V8QI V4HI V2SI */
8886+VECTOR_MODES (FLOAT, 4); /* V4HF V2SF */
8887diff -urN original-gcc/gcc/config/riscv/riscv-opc.h gcc/gcc/config/riscv/riscv-opc.h
8888--- original-gcc/gcc/config/riscv/riscv-opc.h 1970-01-01 01:00:00.000000000 +0100
8889+++ gcc-4.9.2/gcc/config/riscv/riscv-opc.h 2015-03-07 09:51:45.663139025 +0100
8890@@ -0,0 +1,1216 @@
8891+/* Automatically generated by parse-opcodes */
8892+#ifndef RISCV_ENCODING_H
8893+#define RISCV_ENCODING_H
8894+#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
8895+#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
8896+#define MATCH_VLSEGSTWU 0xc00305b
8897+#define MASK_VLSEGSTWU 0x1e00707f
8898+#define MATCH_C_LW0 0x12
8899+#define MASK_C_LW0 0x801f
8900+#define MATCH_FMV_D_X 0xf2000053
8901+#define MASK_FMV_D_X 0xfff0707f
8902+#define MATCH_VLH 0x200205b
8903+#define MASK_VLH 0xfff0707f
8904+#define MATCH_C_LI 0x0
8905+#define MASK_C_LI 0x1f
8906+#define MATCH_FADD_D 0x2000053
8907+#define MASK_FADD_D 0xfe00007f
8908+#define MATCH_C_LD 0x9
8909+#define MASK_C_LD 0x1f
8910+#define MATCH_VLD 0x600205b
8911+#define MASK_VLD 0xfff0707f
8912+#define MATCH_FADD_S 0x53
8913+#define MASK_FADD_S 0xfe00007f
8914+#define MATCH_C_LW 0xa
8915+#define MASK_C_LW 0x1f
8916+#define MATCH_VLW 0x400205b
8917+#define MASK_VLW 0xfff0707f
8918+#define MATCH_VSSEGSTW 0x400307b
8919+#define MASK_VSSEGSTW 0x1e00707f
8920+#define MATCH_UTIDX 0x6077
8921+#define MASK_UTIDX 0xfffff07f
8922+#define MATCH_C_FLW 0x14
8923+#define MASK_C_FLW 0x1f
8924+#define MATCH_FSUB_D 0xa000053
8925+#define MASK_FSUB_D 0xfe00007f
8926+#define MATCH_VSSEGSTD 0x600307b
8927+#define MASK_VSSEGSTD 0x1e00707f
8928+#define MATCH_VSSEGSTB 0x307b
8929+#define MASK_VSSEGSTB 0x1e00707f
8930+#define MATCH_DIV 0x2004033
8931+#define MASK_DIV 0xfe00707f
8932+#define MATCH_FMV_H_X 0xf4000053
8933+#define MASK_FMV_H_X 0xfff0707f
8934+#define MATCH_C_FLD 0x15
8935+#define MASK_C_FLD 0x1f
8936+#define MATCH_FRRM 0x202073
8937+#define MASK_FRRM 0xfffff07f
8938+#define MATCH_VFMSV_S 0x1000202b
8939+#define MASK_VFMSV_S 0xfff0707f
8940+#define MATCH_C_LWSP 0x5
8941+#define MASK_C_LWSP 0x1f
8942+#define MATCH_FENCE 0xf
8943+#define MASK_FENCE 0x707f
8944+#define MATCH_FNMSUB_S 0x4b
8945+#define MASK_FNMSUB_S 0x600007f
8946+#define MATCH_FLE_S 0xa0000053
8947+#define MASK_FLE_S 0xfe00707f
8948+#define MATCH_FNMSUB_H 0x400004b
8949+#define MASK_FNMSUB_H 0x600007f
8950+#define MATCH_FLE_H 0xbc000053
8951+#define MASK_FLE_H 0xfe00707f
8952+#define MATCH_FLW 0x2007
8953+#define MASK_FLW 0x707f
8954+#define MATCH_VSETVL 0x600b
8955+#define MASK_VSETVL 0xfff0707f
8956+#define MATCH_VFMSV_D 0x1200202b
8957+#define MASK_VFMSV_D 0xfff0707f
8958+#define MATCH_FLE_D 0xa2000053
8959+#define MASK_FLE_D 0xfe00707f
8960+#define MATCH_FENCE_I 0x100f
8961+#define MASK_FENCE_I 0x707f
8962+#define MATCH_FNMSUB_D 0x200004b
8963+#define MASK_FNMSUB_D 0x600007f
8964+#define MATCH_ADDW 0x3b
8965+#define MASK_ADDW 0xfe00707f
8966+#define MATCH_XOR 0x4033
8967+#define MASK_XOR 0xfe00707f
8968+#define MATCH_SUB 0x40000033
8969+#define MASK_SUB 0xfe00707f
8970+#define MATCH_VSSTW 0x400307b
8971+#define MASK_VSSTW 0xfe00707f
8972+#define MATCH_VSSTH 0x200307b
8973+#define MASK_VSSTH 0xfe00707f
8974+#define MATCH_SC_W 0x1800202f
8975+#define MASK_SC_W 0xf800707f
8976+#define MATCH_VSSTB 0x307b
8977+#define MASK_VSSTB 0xfe00707f
8978+#define MATCH_VSSTD 0x600307b
8979+#define MASK_VSSTD 0xfe00707f
8980+#define MATCH_ADDI 0x13
8981+#define MASK_ADDI 0x707f
8982+#define MATCH_RDTIMEH 0xc8102073
8983+#define MASK_RDTIMEH 0xfffff07f
8984+#define MATCH_MULH 0x2001033
8985+#define MASK_MULH 0xfe00707f
8986+#define MATCH_CSRRSI 0x6073
8987+#define MASK_CSRRSI 0x707f
8988+#define MATCH_FCVT_D_WU 0xd2100053
8989+#define MASK_FCVT_D_WU 0xfff0007f
8990+#define MATCH_MULW 0x200003b
8991+#define MASK_MULW 0xfe00707f
8992+#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
8993+#define MASK_CUSTOM1_RD_RS1_RS2 0x707f
8994+#define MATCH_VENQIMM1 0xc00302b
8995+#define MASK_VENQIMM1 0xfe007fff
8996+#define MATCH_VENQIMM2 0xe00302b
8997+#define MASK_VENQIMM2 0xfe007fff
8998+#define MATCH_RDINSTRET 0xc0202073
8999+#define MASK_RDINSTRET 0xfffff07f
9000+#define MATCH_C_SWSP 0x8
9001+#define MASK_C_SWSP 0x1f
9002+#define MATCH_VLSTW 0x400305b
9003+#define MASK_VLSTW 0xfe00707f
9004+#define MATCH_VLSTH 0x200305b
9005+#define MASK_VLSTH 0xfe00707f
9006+#define MATCH_VLSTB 0x305b
9007+#define MASK_VLSTB 0xfe00707f
9008+#define MATCH_VLSTD 0x600305b
9009+#define MASK_VLSTD 0xfe00707f
9010+#define MATCH_ANDI 0x7013
9011+#define MASK_ANDI 0x707f
9012+#define MATCH_FMV_X_S 0xe0000053
9013+#define MASK_FMV_X_S 0xfff0707f
9014+#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
9015+#define MASK_CUSTOM0_RD_RS1_RS2 0x707f
9016+#define MATCH_FNMADD_S 0x4f
9017+#define MASK_FNMADD_S 0x600007f
9018+#define MATCH_LWU 0x6003
9019+#define MASK_LWU 0x707f
9020+#define MATCH_CUSTOM0_RS1 0x200b
9021+#define MASK_CUSTOM0_RS1 0x707f
9022+#define MATCH_VLSEGSTBU 0x800305b
9023+#define MASK_VLSEGSTBU 0x1e00707f
9024+#define MATCH_FNMADD_D 0x200004f
9025+#define MASK_FNMADD_D 0x600007f
9026+#define MATCH_FCVT_W_S 0xc0000053
9027+#define MASK_FCVT_W_S 0xfff0007f
9028+#define MATCH_C_SRAI 0x1019
9029+#define MASK_C_SRAI 0x1c1f
9030+#define MATCH_MULHSU 0x2002033
9031+#define MASK_MULHSU 0xfe00707f
9032+#define MATCH_FCVT_D_LU 0xd2300053
9033+#define MASK_FCVT_D_LU 0xfff0007f
9034+#define MATCH_FCVT_W_D 0xc2000053
9035+#define MASK_FCVT_W_D 0xfff0007f
9036+#define MATCH_FSUB_H 0xc000053
9037+#define MASK_FSUB_H 0xfe00007f
9038+#define MATCH_DIVUW 0x200503b
9039+#define MASK_DIVUW 0xfe00707f
9040+#define MATCH_SLTI 0x2013
9041+#define MASK_SLTI 0x707f
9042+#define MATCH_VLSTBU 0x800305b
9043+#define MASK_VLSTBU 0xfe00707f
9044+#define MATCH_SLTU 0x3033
9045+#define MASK_SLTU 0xfe00707f
9046+#define MATCH_FLH 0x1007
9047+#define MASK_FLH 0x707f
9048+#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
9049+#define MASK_CUSTOM2_RD_RS1_RS2 0x707f
9050+#define MATCH_FLD 0x3007
9051+#define MASK_FLD 0x707f
9052+#define MATCH_FSUB_S 0x8000053
9053+#define MASK_FSUB_S 0xfe00007f
9054+#define MATCH_FCVT_H_LU 0x6c000053
9055+#define MASK_FCVT_H_LU 0xfff0007f
9056+#define MATCH_CUSTOM0 0xb
9057+#define MASK_CUSTOM0 0x707f
9058+#define MATCH_CUSTOM1 0x2b
9059+#define MASK_CUSTOM1 0x707f
9060+#define MATCH_CUSTOM2 0x5b
9061+#define MASK_CUSTOM2 0x707f
9062+#define MATCH_CUSTOM3 0x7b
9063+#define MASK_CUSTOM3 0x707f
9064+#define MATCH_VXCPTSAVE 0x302b
9065+#define MASK_VXCPTSAVE 0xfff07fff
9066+#define MATCH_VMSV 0x200202b
9067+#define MASK_VMSV 0xfff0707f
9068+#define MATCH_FCVT_LU_S 0xc0300053
9069+#define MASK_FCVT_LU_S 0xfff0007f
9070+#define MATCH_AUIPC 0x17
9071+#define MASK_AUIPC 0x7f
9072+#define MATCH_FRFLAGS 0x102073
9073+#define MASK_FRFLAGS 0xfffff07f
9074+#define MATCH_FCVT_LU_D 0xc2300053
9075+#define MASK_FCVT_LU_D 0xfff0007f
9076+#define MATCH_CSRRWI 0x5073
9077+#define MASK_CSRRWI 0x707f
9078+#define MATCH_FADD_H 0x4000053
9079+#define MASK_FADD_H 0xfe00007f
9080+#define MATCH_FSQRT_S 0x58000053
9081+#define MASK_FSQRT_S 0xfff0007f
9082+#define MATCH_VXCPTKILL 0x400302b
9083+#define MASK_VXCPTKILL 0xffffffff
9084+#define MATCH_STOP 0x5077
9085+#define MASK_STOP 0xffffffff
9086+#define MATCH_FSGNJN_S 0x20001053
9087+#define MASK_FSGNJN_S 0xfe00707f
9088+#define MATCH_FSGNJN_H 0x34000053
9089+#define MASK_FSGNJN_H 0xfe00707f
9090+#define MATCH_FSQRT_D 0x5a000053
9091+#define MASK_FSQRT_D 0xfff0007f
9092+#define MATCH_XORI 0x4013
9093+#define MASK_XORI 0x707f
9094+#define MATCH_DIVU 0x2005033
9095+#define MASK_DIVU 0xfe00707f
9096+#define MATCH_FSGNJN_D 0x22001053
9097+#define MASK_FSGNJN_D 0xfe00707f
9098+#define MATCH_FSQRT_H 0x24000053
9099+#define MASK_FSQRT_H 0xfff0007f
9100+#define MATCH_VSSEGSTH 0x200307b
9101+#define MASK_VSSEGSTH 0x1e00707f
9102+#define MATCH_SW 0x2023
9103+#define MASK_SW 0x707f
9104+#define MATCH_VLSTWU 0xc00305b
9105+#define MASK_VLSTWU 0xfe00707f
9106+#define MATCH_VFSSEGW 0x1400207b
9107+#define MASK_VFSSEGW 0x1ff0707f
9108+#define MATCH_LHU 0x5003
9109+#define MASK_LHU 0x707f
9110+#define MATCH_SH 0x1023
9111+#define MASK_SH 0x707f
9112+#define MATCH_FMSUB_H 0x4000047
9113+#define MASK_FMSUB_H 0x600007f
9114+#define MATCH_VXCPTAUX 0x200402b
9115+#define MASK_VXCPTAUX 0xfffff07f
9116+#define MATCH_FMSUB_D 0x2000047
9117+#define MASK_FMSUB_D 0x600007f
9118+#define MATCH_VFSSEGD 0x1600207b
9119+#define MASK_VFSSEGD 0x1ff0707f
9120+#define MATCH_VLSEGHU 0xa00205b
9121+#define MASK_VLSEGHU 0x1ff0707f
9122+#define MATCH_MOVN 0x2007077
9123+#define MASK_MOVN 0xfe00707f
9124+#define MATCH_CUSTOM1_RS1 0x202b
9125+#define MASK_CUSTOM1_RS1 0x707f
9126+#define MATCH_VLSTHU 0xa00305b
9127+#define MASK_VLSTHU 0xfe00707f
9128+#define MATCH_MOVZ 0x7077
9129+#define MASK_MOVZ 0xfe00707f
9130+#define MATCH_CSRRW 0x1073
9131+#define MASK_CSRRW 0x707f
9132+#define MATCH_LD 0x3003
9133+#define MASK_LD 0x707f
9134+#define MATCH_LB 0x3
9135+#define MASK_LB 0x707f
9136+#define MATCH_VLWU 0xc00205b
9137+#define MASK_VLWU 0xfff0707f
9138+#define MATCH_LH 0x1003
9139+#define MASK_LH 0x707f
9140+#define MATCH_LW 0x2003
9141+#define MASK_LW 0x707f
9142+#define MATCH_CSRRC 0x3073
9143+#define MASK_CSRRC 0x707f
9144+#define MATCH_FCVT_LU_H 0x4c000053
9145+#define MASK_FCVT_LU_H 0xfff0007f
9146+#define MATCH_FCVT_S_D 0x40100053
9147+#define MASK_FCVT_S_D 0xfff0007f
9148+#define MATCH_BGEU 0x7063
9149+#define MASK_BGEU 0x707f
9150+#define MATCH_VFLSTD 0x1600305b
9151+#define MASK_VFLSTD 0xfe00707f
9152+#define MATCH_FCVT_S_L 0xd0200053
9153+#define MASK_FCVT_S_L 0xfff0007f
9154+#define MATCH_FCVT_S_H 0x84000053
9155+#define MASK_FCVT_S_H 0xfff0007f
9156+#define MATCH_FSCSR 0x301073
9157+#define MASK_FSCSR 0xfff0707f
9158+#define MATCH_FCVT_S_W 0xd0000053
9159+#define MASK_FCVT_S_W 0xfff0007f
9160+#define MATCH_VFLSTW 0x1400305b
9161+#define MASK_VFLSTW 0xfe00707f
9162+#define MATCH_VXCPTEVAC 0x600302b
9163+#define MASK_VXCPTEVAC 0xfff07fff
9164+#define MATCH_AMOMINU_D 0xc000302f
9165+#define MASK_AMOMINU_D 0xf800707f
9166+#define MATCH_FSFLAGS 0x101073
9167+#define MASK_FSFLAGS 0xfff0707f
9168+#define MATCH_SRLI 0x5013
9169+#define MASK_SRLI 0xfc00707f
9170+#define MATCH_C_SRLI 0x819
9171+#define MASK_C_SRLI 0x1c1f
9172+#define MATCH_AMOMINU_W 0xc000202f
9173+#define MASK_AMOMINU_W 0xf800707f
9174+#define MATCH_SRLW 0x503b
9175+#define MASK_SRLW 0xfe00707f
9176+#define MATCH_VFLSEGW 0x1400205b
9177+#define MASK_VFLSEGW 0x1ff0707f
9178+#define MATCH_C_LD0 0x8012
9179+#define MASK_C_LD0 0x801f
9180+#define MATCH_VLSEGBU 0x800205b
9181+#define MASK_VLSEGBU 0x1ff0707f
9182+#define MATCH_JALR 0x67
9183+#define MASK_JALR 0x707f
9184+#define MATCH_BLT 0x4063
9185+#define MASK_BLT 0x707f
9186+#define MATCH_CUSTOM2_RD_RS1 0x605b
9187+#define MASK_CUSTOM2_RD_RS1 0x707f
9188+#define MATCH_FCLASS_S 0xe0001053
9189+#define MASK_FCLASS_S 0xfff0707f
9190+#define MATCH_REM 0x2006033
9191+#define MASK_REM 0xfe00707f
9192+#define MATCH_FCLASS_D 0xe2001053
9193+#define MASK_FCLASS_D 0xfff0707f
9194+#define MATCH_FMUL_S 0x10000053
9195+#define MASK_FMUL_S 0xfe00007f
9196+#define MATCH_RDCYCLEH 0xc8002073
9197+#define MASK_RDCYCLEH 0xfffff07f
9198+#define MATCH_VLSEGSTHU 0xa00305b
9199+#define MASK_VLSEGSTHU 0x1e00707f
9200+#define MATCH_FMUL_D 0x12000053
9201+#define MASK_FMUL_D 0xfe00007f
9202+#define MATCH_ORI 0x6013
9203+#define MASK_ORI 0x707f
9204+#define MATCH_FMUL_H 0x14000053
9205+#define MASK_FMUL_H 0xfe00007f
9206+#define MATCH_VFLSEGD 0x1600205b
9207+#define MASK_VFLSEGD 0x1ff0707f
9208+#define MATCH_FEQ_S 0xa0002053
9209+#define MASK_FEQ_S 0xfe00707f
9210+#define MATCH_FSGNJX_D 0x22002053
9211+#define MASK_FSGNJX_D 0xfe00707f
9212+#define MATCH_SRAIW 0x4000501b
9213+#define MASK_SRAIW 0xfe00707f
9214+#define MATCH_FSGNJX_H 0x3c000053
9215+#define MASK_FSGNJX_H 0xfe00707f
9216+#define MATCH_FSGNJX_S 0x20002053
9217+#define MASK_FSGNJX_S 0xfe00707f
9218+#define MATCH_FEQ_D 0xa2002053
9219+#define MASK_FEQ_D 0xfe00707f
9220+#define MATCH_CUSTOM1_RD_RS1 0x602b
9221+#define MASK_CUSTOM1_RD_RS1 0x707f
9222+#define MATCH_FEQ_H 0xac000053
9223+#define MASK_FEQ_H 0xfe00707f
9224+#define MATCH_AMOMAXU_D 0xe000302f
9225+#define MASK_AMOMAXU_D 0xf800707f
9226+#define MATCH_DIVW 0x200403b
9227+#define MASK_DIVW 0xfe00707f
9228+#define MATCH_AMOMAXU_W 0xe000202f
9229+#define MASK_AMOMAXU_W 0xf800707f
9230+#define MATCH_SRAI_RV32 0x40005013
9231+#define MASK_SRAI_RV32 0xfe00707f
9232+#define MATCH_C_SRLI32 0xc19
9233+#define MASK_C_SRLI32 0x1c1f
9234+#define MATCH_VFSSTW 0x1400307b
9235+#define MASK_VFSSTW 0xfe00707f
9236+#define MATCH_CUSTOM0_RD 0x400b
9237+#define MASK_CUSTOM0_RD 0x707f
9238+#define MATCH_C_BEQ 0x10
9239+#define MASK_C_BEQ 0x1f
9240+#define MATCH_VFSSTD 0x1600307b
9241+#define MASK_VFSSTD 0xfe00707f
9242+#define MATCH_CUSTOM3_RD_RS1 0x607b
9243+#define MASK_CUSTOM3_RD_RS1 0x707f
9244+#define MATCH_LR_D 0x1000302f
9245+#define MASK_LR_D 0xf9f0707f
9246+#define MATCH_LR_W 0x1000202f
9247+#define MASK_LR_W 0xf9f0707f
9248+#define MATCH_FCVT_H_WU 0x7c000053
9249+#define MASK_FCVT_H_WU 0xfff0007f
9250+#define MATCH_VMVV 0x200002b
9251+#define MASK_VMVV 0xfff0707f
9252+#define MATCH_SLLW 0x103b
9253+#define MASK_SLLW 0xfe00707f
9254+#define MATCH_SLLI 0x1013
9255+#define MASK_SLLI 0xfc00707f
9256+#define MATCH_BEQ 0x63
9257+#define MASK_BEQ 0x707f
9258+#define MATCH_AND 0x7033
9259+#define MASK_AND 0xfe00707f
9260+#define MATCH_LBU 0x4003
9261+#define MASK_LBU 0x707f
9262+#define MATCH_FSGNJ_S 0x20000053
9263+#define MASK_FSGNJ_S 0xfe00707f
9264+#define MATCH_FMSUB_S 0x47
9265+#define MASK_FMSUB_S 0x600007f
9266+#define MATCH_C_SUB3 0x11c
9267+#define MASK_C_SUB3 0x31f
9268+#define MATCH_FSGNJ_H 0x2c000053
9269+#define MASK_FSGNJ_H 0xfe00707f
9270+#define MATCH_VLB 0x205b
9271+#define MASK_VLB 0xfff0707f
9272+#define MATCH_C_ADDIW 0x1d
9273+#define MASK_C_ADDIW 0x1f
9274+#define MATCH_CUSTOM3_RS1_RS2 0x307b
9275+#define MASK_CUSTOM3_RS1_RS2 0x707f
9276+#define MATCH_FSGNJ_D 0x22000053
9277+#define MASK_FSGNJ_D 0xfe00707f
9278+#define MATCH_VLSEGWU 0xc00205b
9279+#define MASK_VLSEGWU 0x1ff0707f
9280+#define MATCH_FCVT_S_WU 0xd0100053
9281+#define MASK_FCVT_S_WU 0xfff0007f
9282+#define MATCH_CUSTOM3_RS1 0x207b
9283+#define MASK_CUSTOM3_RS1 0x707f
9284+#define MATCH_SC_D 0x1800302f
9285+#define MASK_SC_D 0xf800707f
9286+#define MATCH_VFSW 0x1400207b
9287+#define MASK_VFSW 0xfff0707f
9288+#define MATCH_AMOSWAP_D 0x800302f
9289+#define MASK_AMOSWAP_D 0xf800707f
9290+#define MATCH_SB 0x23
9291+#define MASK_SB 0x707f
9292+#define MATCH_AMOSWAP_W 0x800202f
9293+#define MASK_AMOSWAP_W 0xf800707f
9294+#define MATCH_VFSD 0x1600207b
9295+#define MASK_VFSD 0xfff0707f
9296+#define MATCH_CUSTOM2_RS1 0x205b
9297+#define MASK_CUSTOM2_RS1 0x707f
9298+#define MATCH_SD 0x3023
9299+#define MASK_SD 0x707f
9300+#define MATCH_FMV_S_X 0xf0000053
9301+#define MASK_FMV_S_X 0xfff0707f
9302+#define MATCH_REMUW 0x200703b
9303+#define MASK_REMUW 0xfe00707f
9304+#define MATCH_JAL 0x6f
9305+#define MASK_JAL 0x7f
9306+#define MATCH_C_FSD 0x18
9307+#define MASK_C_FSD 0x1f
9308+#define MATCH_RDCYCLE 0xc0002073
9309+#define MASK_RDCYCLE 0xfffff07f
9310+#define MATCH_C_BNE 0x11
9311+#define MASK_C_BNE 0x1f
9312+#define MATCH_C_ADD 0x1a
9313+#define MASK_C_ADD 0x801f
9314+#define MATCH_VXCPTCAUSE 0x402b
9315+#define MASK_VXCPTCAUSE 0xfffff07f
9316+#define MATCH_VGETCFG 0x400b
9317+#define MASK_VGETCFG 0xfffff07f
9318+#define MATCH_LUI 0x37
9319+#define MASK_LUI 0x7f
9320+#define MATCH_VSETCFG 0x200b
9321+#define MASK_VSETCFG 0x7fff
9322+#define MATCH_C_SDSP 0x6
9323+#define MASK_C_SDSP 0x1f
9324+#define MATCH_C_LDSP 0x4
9325+#define MASK_C_LDSP 0x1f
9326+#define MATCH_FNMADD_H 0x400004f
9327+#define MASK_FNMADD_H 0x600007f
9328+#define MATCH_CUSTOM0_RS1_RS2 0x300b
9329+#define MASK_CUSTOM0_RS1_RS2 0x707f
9330+#define MATCH_SLLI_RV32 0x1013
9331+#define MASK_SLLI_RV32 0xfe00707f
9332+#define MATCH_MUL 0x2000033
9333+#define MASK_MUL 0xfe00707f
9334+#define MATCH_CSRRCI 0x7073
9335+#define MASK_CSRRCI 0x707f
9336+#define MATCH_C_SRAI32 0x1419
9337+#define MASK_C_SRAI32 0x1c1f
9338+#define MATCH_FLT_H 0xb4000053
9339+#define MASK_FLT_H 0xfe00707f
9340+#define MATCH_SRAI 0x40005013
9341+#define MASK_SRAI 0xfc00707f
9342+#define MATCH_AMOAND_D 0x6000302f
9343+#define MASK_AMOAND_D 0xf800707f
9344+#define MATCH_FLT_D 0xa2001053
9345+#define MASK_FLT_D 0xfe00707f
9346+#define MATCH_SRAW 0x4000503b
9347+#define MASK_SRAW 0xfe00707f
9348+#define MATCH_CSRRS 0x2073
9349+#define MASK_CSRRS 0x707f
9350+#define MATCH_FLT_S 0xa0001053
9351+#define MASK_FLT_S 0xfe00707f
9352+#define MATCH_ADDIW 0x1b
9353+#define MASK_ADDIW 0x707f
9354+#define MATCH_AMOAND_W 0x6000202f
9355+#define MASK_AMOAND_W 0xf800707f
9356+#define MATCH_CUSTOM2_RD 0x405b
9357+#define MASK_CUSTOM2_RD 0x707f
9358+#define MATCH_FCVT_WU_D 0xc2100053
9359+#define MASK_FCVT_WU_D 0xfff0007f
9360+#define MATCH_AMOXOR_W 0x2000202f
9361+#define MASK_AMOXOR_W 0xf800707f
9362+#define MATCH_FCVT_D_L 0xd2200053
9363+#define MASK_FCVT_D_L 0xfff0007f
9364+#define MATCH_FCVT_WU_H 0x5c000053
9365+#define MASK_FCVT_WU_H 0xfff0007f
9366+#define MATCH_C_SLLI 0x19
9367+#define MASK_C_SLLI 0x1c1f
9368+#define MATCH_AMOXOR_D 0x2000302f
9369+#define MASK_AMOXOR_D 0xf800707f
9370+#define MATCH_FCVT_WU_S 0xc0100053
9371+#define MASK_FCVT_WU_S 0xfff0007f
9372+#define MATCH_CUSTOM3_RD 0x407b
9373+#define MASK_CUSTOM3_RD 0x707f
9374+#define MATCH_FMAX_H 0xcc000053
9375+#define MASK_FMAX_H 0xfe00707f
9376+#define MATCH_VENQCNT 0x1000302b
9377+#define MASK_VENQCNT 0xfe007fff
9378+#define MATCH_VLBU 0x800205b
9379+#define MASK_VLBU 0xfff0707f
9380+#define MATCH_VLHU 0xa00205b
9381+#define MASK_VLHU 0xfff0707f
9382+#define MATCH_C_SW 0xd
9383+#define MASK_C_SW 0x1f
9384+#define MATCH_C_SD 0xc
9385+#define MASK_C_SD 0x1f
9386+#define MATCH_C_OR3 0x21c
9387+#define MASK_C_OR3 0x31f
9388+#define MATCH_C_AND3 0x31c
9389+#define MASK_C_AND3 0x31f
9390+#define MATCH_VFSSEGSTW 0x1400307b
9391+#define MASK_VFSSEGSTW 0x1e00707f
9392+#define MATCH_SLT 0x2033
9393+#define MASK_SLT 0xfe00707f
9394+#define MATCH_AMOOR_D 0x4000302f
9395+#define MASK_AMOOR_D 0xf800707f
9396+#define MATCH_REMU 0x2007033
9397+#define MASK_REMU 0xfe00707f
9398+#define MATCH_REMW 0x200603b
9399+#define MASK_REMW 0xfe00707f
9400+#define MATCH_SLL 0x1033
9401+#define MASK_SLL 0xfe00707f
9402+#define MATCH_VFSSEGSTD 0x1600307b
9403+#define MASK_VFSSEGSTD 0x1e00707f
9404+#define MATCH_AMOOR_W 0x4000202f
9405+#define MASK_AMOOR_W 0xf800707f
9406+#define MATCH_CUSTOM2_RS1_RS2 0x305b
9407+#define MASK_CUSTOM2_RS1_RS2 0x707f
9408+#define MATCH_VF 0x10202b
9409+#define MASK_VF 0x1f0707f
9410+#define MATCH_VFMVV 0x1000002b
9411+#define MASK_VFMVV 0xfff0707f
9412+#define MATCH_VFLSEGSTW 0x1400305b
9413+#define MASK_VFLSEGSTW 0x1e00707f
9414+#define MATCH_VXCPTRESTORE 0x200302b
9415+#define MASK_VXCPTRESTORE 0xfff07fff
9416+#define MATCH_VXCPTHOLD 0x800302b
9417+#define MASK_VXCPTHOLD 0xffffffff
9418+#define MATCH_SLTIU 0x3013
9419+#define MASK_SLTIU 0x707f
9420+#define MATCH_VFLSEGSTD 0x1600305b
9421+#define MASK_VFLSEGSTD 0x1e00707f
9422+#define MATCH_VFLD 0x1600205b
9423+#define MASK_VFLD 0xfff0707f
9424+#define MATCH_FMADD_S 0x43
9425+#define MASK_FMADD_S 0x600007f
9426+#define MATCH_VFLW 0x1400205b
9427+#define MASK_VFLW 0xfff0707f
9428+#define MATCH_FMADD_D 0x2000043
9429+#define MASK_FMADD_D 0x600007f
9430+#define MATCH_FMADD_H 0x4000043
9431+#define MASK_FMADD_H 0x600007f
9432+#define MATCH_SRET 0x80000073
9433+#define MASK_SRET 0xffffffff
9434+#define MATCH_VSSEGW 0x400207b
9435+#define MASK_VSSEGW 0x1ff0707f
9436+#define MATCH_CUSTOM0_RD_RS1 0x600b
9437+#define MASK_CUSTOM0_RD_RS1 0x707f
9438+#define MATCH_VSSEGH 0x200207b
9439+#define MASK_VSSEGH 0x1ff0707f
9440+#define MATCH_FRCSR 0x302073
9441+#define MASK_FRCSR 0xfffff07f
9442+#define MATCH_VSSEGD 0x600207b
9443+#define MASK_VSSEGD 0x1ff0707f
9444+#define MATCH_VSSEGB 0x207b
9445+#define MASK_VSSEGB 0x1ff0707f
9446+#define MATCH_FMIN_H 0xc4000053
9447+#define MASK_FMIN_H 0xfe00707f
9448+#define MATCH_FMIN_D 0x2a000053
9449+#define MASK_FMIN_D 0xfe00707f
9450+#define MATCH_BLTU 0x6063
9451+#define MASK_BLTU 0x707f
9452+#define MATCH_FMIN_S 0x28000053
9453+#define MASK_FMIN_S 0xfe00707f
9454+#define MATCH_SRLI_RV32 0x5013
9455+#define MASK_SRLI_RV32 0xfe00707f
9456+#define MATCH_SLLIW 0x101b
9457+#define MASK_SLLIW 0xfe00707f
9458+#define MATCH_FMAX_S 0x28001053
9459+#define MASK_FMAX_S 0xfe00707f
9460+#define MATCH_FCVT_D_H 0x8c000053
9461+#define MASK_FCVT_D_H 0xfff0007f
9462+#define MATCH_FCVT_D_W 0xd2000053
9463+#define MASK_FCVT_D_W 0xfff0007f
9464+#define MATCH_ADD 0x33
9465+#define MASK_ADD 0xfe00707f
9466+#define MATCH_FCVT_D_S 0x42000053
9467+#define MASK_FCVT_D_S 0xfff0007f
9468+#define MATCH_FMAX_D 0x2a001053
9469+#define MASK_FMAX_D 0xfe00707f
9470+#define MATCH_BNE 0x1063
9471+#define MASK_BNE 0x707f
9472+#define MATCH_CUSTOM1_RD 0x402b
9473+#define MASK_CUSTOM1_RD 0x707f
9474+#define MATCH_FSRM 0x201073
9475+#define MASK_FSRM 0xfff0707f
9476+#define MATCH_FDIV_D 0x1a000053
9477+#define MASK_FDIV_D 0xfe00007f
9478+#define MATCH_VSW 0x400207b
9479+#define MASK_VSW 0xfff0707f
9480+#define MATCH_FCVT_L_S 0xc0200053
9481+#define MASK_FCVT_L_S 0xfff0007f
9482+#define MATCH_FDIV_H 0x1c000053
9483+#define MASK_FDIV_H 0xfe00007f
9484+#define MATCH_VSB 0x207b
9485+#define MASK_VSB 0xfff0707f
9486+#define MATCH_FDIV_S 0x18000053
9487+#define MASK_FDIV_S 0xfe00007f
9488+#define MATCH_FSRMI 0x205073
9489+#define MASK_FSRMI 0xfff0707f
9490+#define MATCH_FCVT_L_H 0x44000053
9491+#define MASK_FCVT_L_H 0xfff0007f
9492+#define MATCH_VSH 0x200207b
9493+#define MASK_VSH 0xfff0707f
9494+#define MATCH_FCVT_L_D 0xc2200053
9495+#define MASK_FCVT_L_D 0xfff0007f
9496+#define MATCH_FCVT_H_S 0x90000053
9497+#define MASK_FCVT_H_S 0xfff0007f
9498+#define MATCH_SCALL 0x73
9499+#define MASK_SCALL 0xffffffff
9500+#define MATCH_FSFLAGSI 0x105073
9501+#define MASK_FSFLAGSI 0xfff0707f
9502+#define MATCH_FCVT_H_W 0x74000053
9503+#define MASK_FCVT_H_W 0xfff0007f
9504+#define MATCH_FCVT_H_L 0x64000053
9505+#define MASK_FCVT_H_L 0xfff0007f
9506+#define MATCH_SRLIW 0x501b
9507+#define MASK_SRLIW 0xfe00707f
9508+#define MATCH_FCVT_S_LU 0xd0300053
9509+#define MASK_FCVT_S_LU 0xfff0007f
9510+#define MATCH_FCVT_H_D 0x92000053
9511+#define MASK_FCVT_H_D 0xfff0007f
9512+#define MATCH_SBREAK 0x100073
9513+#define MASK_SBREAK 0xffffffff
9514+#define MATCH_RDINSTRETH 0xc8202073
9515+#define MASK_RDINSTRETH 0xfffff07f
9516+#define MATCH_SRA 0x40005033
9517+#define MASK_SRA 0xfe00707f
9518+#define MATCH_BGE 0x5063
9519+#define MASK_BGE 0x707f
9520+#define MATCH_SRL 0x5033
9521+#define MASK_SRL 0xfe00707f
9522+#define MATCH_VENQCMD 0xa00302b
9523+#define MASK_VENQCMD 0xfe007fff
9524+#define MATCH_OR 0x6033
9525+#define MASK_OR 0xfe00707f
9526+#define MATCH_SUBW 0x4000003b
9527+#define MASK_SUBW 0xfe00707f
9528+#define MATCH_FMV_X_D 0xe2000053
9529+#define MASK_FMV_X_D 0xfff0707f
9530+#define MATCH_RDTIME 0xc0102073
9531+#define MASK_RDTIME 0xfffff07f
9532+#define MATCH_AMOADD_D 0x302f
9533+#define MASK_AMOADD_D 0xf800707f
9534+#define MATCH_AMOMAX_W 0xa000202f
9535+#define MASK_AMOMAX_W 0xf800707f
9536+#define MATCH_C_MOVE 0x2
9537+#define MASK_C_MOVE 0x801f
9538+#define MATCH_FMOVN 0x6007077
9539+#define MASK_FMOVN 0xfe00707f
9540+#define MATCH_C_FSW 0x16
9541+#define MASK_C_FSW 0x1f
9542+#define MATCH_AMOADD_W 0x202f
9543+#define MASK_AMOADD_W 0xf800707f
9544+#define MATCH_AMOMAX_D 0xa000302f
9545+#define MASK_AMOMAX_D 0xf800707f
9546+#define MATCH_FMOVZ 0x4007077
9547+#define MASK_FMOVZ 0xfe00707f
9548+#define MATCH_CUSTOM1_RS1_RS2 0x302b
9549+#define MASK_CUSTOM1_RS1_RS2 0x707f
9550+#define MATCH_FMV_X_H 0xe4000053
9551+#define MASK_FMV_X_H 0xfff0707f
9552+#define MATCH_VSD 0x600207b
9553+#define MASK_VSD 0xfff0707f
9554+#define MATCH_VLSEGSTW 0x400305b
9555+#define MASK_VLSEGSTW 0x1e00707f
9556+#define MATCH_C_ADDI 0x1
9557+#define MASK_C_ADDI 0x1f
9558+#define MATCH_C_SLLIW 0x1819
9559+#define MASK_C_SLLIW 0x1c1f
9560+#define MATCH_VLSEGSTB 0x305b
9561+#define MASK_VLSEGSTB 0x1e00707f
9562+#define MATCH_VLSEGSTD 0x600305b
9563+#define MASK_VLSEGSTD 0x1e00707f
9564+#define MATCH_VLSEGSTH 0x200305b
9565+#define MASK_VLSEGSTH 0x1e00707f
9566+#define MATCH_MULHU 0x2003033
9567+#define MASK_MULHU 0xfe00707f
9568+#define MATCH_AMOMIN_W 0x8000202f
9569+#define MASK_AMOMIN_W 0xf800707f
9570+#define MATCH_C_SLLI32 0x419
9571+#define MASK_C_SLLI32 0x1c1f
9572+#define MATCH_C_ADD3 0x1c
9573+#define MASK_C_ADD3 0x31f
9574+#define MATCH_VGETVL 0x200400b
9575+#define MASK_VGETVL 0xfffff07f
9576+#define MATCH_AMOMIN_D 0x8000302f
9577+#define MASK_AMOMIN_D 0xf800707f
9578+#define MATCH_FCVT_W_H 0x54000053
9579+#define MASK_FCVT_W_H 0xfff0007f
9580+#define MATCH_VLSEGB 0x205b
9581+#define MASK_VLSEGB 0x1ff0707f
9582+#define MATCH_FSD 0x3027
9583+#define MASK_FSD 0x707f
9584+#define MATCH_VLSEGD 0x600205b
9585+#define MASK_VLSEGD 0x1ff0707f
9586+#define MATCH_FSH 0x1027
9587+#define MASK_FSH 0x707f
9588+#define MATCH_VLSEGH 0x200205b
9589+#define MASK_VLSEGH 0x1ff0707f
9590+#define MATCH_C_SUB 0x801a
9591+#define MASK_C_SUB 0x801f
9592+#define MATCH_VLSEGW 0x400205b
9593+#define MASK_VLSEGW 0x1ff0707f
9594+#define MATCH_FSW 0x2027
9595+#define MASK_FSW 0x707f
9596+#define MATCH_C_J 0x8002
9597+#define MASK_C_J 0x801f
9598+#define CSR_FFLAGS 0x1
9599+#define CSR_FRM 0x2
9600+#define CSR_FCSR 0x3
9601+#define CSR_STATS 0xc0
9602+#define CSR_SUP0 0x500
9603+#define CSR_SUP1 0x501
9604+#define CSR_EPC 0x502
9605+#define CSR_BADVADDR 0x503
9606+#define CSR_PTBR 0x504
9607+#define CSR_ASID 0x505
9608+#define CSR_COUNT 0x506
9609+#define CSR_COMPARE 0x507
9610+#define CSR_EVEC 0x508
9611+#define CSR_CAUSE 0x509
9612+#define CSR_STATUS 0x50a
9613+#define CSR_HARTID 0x50b
9614+#define CSR_IMPL 0x50c
9615+#define CSR_FATC 0x50d
9616+#define CSR_SEND_IPI 0x50e
9617+#define CSR_CLEAR_IPI 0x50f
9618+#define CSR_RESET 0x51d
9619+#define CSR_TOHOST 0x51e
9620+#define CSR_FROMHOST 0x51f
9621+#define CSR_CYCLE 0xc00
9622+#define CSR_TIME 0xc01
9623+#define CSR_INSTRET 0xc02
9624+#define CSR_UARCH0 0xcc0
9625+#define CSR_UARCH1 0xcc1
9626+#define CSR_UARCH2 0xcc2
9627+#define CSR_UARCH3 0xcc3
9628+#define CSR_UARCH4 0xcc4
9629+#define CSR_UARCH5 0xcc5
9630+#define CSR_UARCH6 0xcc6
9631+#define CSR_UARCH7 0xcc7
9632+#define CSR_UARCH8 0xcc8
9633+#define CSR_UARCH9 0xcc9
9634+#define CSR_UARCH10 0xcca
9635+#define CSR_UARCH11 0xccb
9636+#define CSR_UARCH12 0xccc
9637+#define CSR_UARCH13 0xccd
9638+#define CSR_UARCH14 0xcce
9639+#define CSR_UARCH15 0xccf
9640+#define CSR_COUNTH 0x586
9641+#define CSR_CYCLEH 0xc80
9642+#define CSR_TIMEH 0xc81
9643+#define CSR_INSTRETH 0xc82
9644+#define CAUSE_MISALIGNED_FETCH 0x0
9645+#define CAUSE_FAULT_FETCH 0x1
9646+#define CAUSE_ILLEGAL_INSTRUCTION 0x2
9647+#define CAUSE_PRIVILEGED_INSTRUCTION 0x3
9648+#define CAUSE_FP_DISABLED 0x4
9649+#define CAUSE_SYSCALL 0x6
9650+#define CAUSE_BREAKPOINT 0x7
9651+#define CAUSE_MISALIGNED_LOAD 0x8
9652+#define CAUSE_MISALIGNED_STORE 0x9
9653+#define CAUSE_FAULT_LOAD 0xa
9654+#define CAUSE_FAULT_STORE 0xb
9655+#define CAUSE_ACCELERATOR_DISABLED 0xc
9656+#endif
9657+#ifdef DECLARE_INSN
9658+DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
9659+DECLARE_INSN(vlsegstwu, MATCH_VLSEGSTWU, MASK_VLSEGSTWU)
9660+DECLARE_INSN(c_lw0, MATCH_C_LW0, MASK_C_LW0)
9661+DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
9662+DECLARE_INSN(vlh, MATCH_VLH, MASK_VLH)
9663+DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
9664+DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
9665+DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
9666+DECLARE_INSN(vld, MATCH_VLD, MASK_VLD)
9667+DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
9668+DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
9669+DECLARE_INSN(vlw, MATCH_VLW, MASK_VLW)
9670+DECLARE_INSN(vssegstw, MATCH_VSSEGSTW, MASK_VSSEGSTW)
9671+DECLARE_INSN(utidx, MATCH_UTIDX, MASK_UTIDX)
9672+DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
9673+DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
9674+DECLARE_INSN(vssegstd, MATCH_VSSEGSTD, MASK_VSSEGSTD)
9675+DECLARE_INSN(vssegstb, MATCH_VSSEGSTB, MASK_VSSEGSTB)
9676+DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
9677+DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X)
9678+DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
9679+DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
9680+DECLARE_INSN(vfmsv_s, MATCH_VFMSV_S, MASK_VFMSV_S)
9681+DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
9682+DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
9683+DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
9684+DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
9685+DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H)
9686+DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H)
9687+DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
9688+DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
9689+DECLARE_INSN(vfmsv_d, MATCH_VFMSV_D, MASK_VFMSV_D)
9690+DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
9691+DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
9692+DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
9693+DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
9694+DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
9695+DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
9696+DECLARE_INSN(vsstw, MATCH_VSSTW, MASK_VSSTW)
9697+DECLARE_INSN(vssth, MATCH_VSSTH, MASK_VSSTH)
9698+DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
9699+DECLARE_INSN(vsstb, MATCH_VSSTB, MASK_VSSTB)
9700+DECLARE_INSN(vsstd, MATCH_VSSTD, MASK_VSSTD)
9701+DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
9702+DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
9703+DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
9704+DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
9705+DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
9706+DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
9707+DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
9708+DECLARE_INSN(venqimm1, MATCH_VENQIMM1, MASK_VENQIMM1)
9709+DECLARE_INSN(venqimm2, MATCH_VENQIMM2, MASK_VENQIMM2)
9710+DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
9711+DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
9712+DECLARE_INSN(vlstw, MATCH_VLSTW, MASK_VLSTW)
9713+DECLARE_INSN(vlsth, MATCH_VLSTH, MASK_VLSTH)
9714+DECLARE_INSN(vlstb, MATCH_VLSTB, MASK_VLSTB)
9715+DECLARE_INSN(vlstd, MATCH_VLSTD, MASK_VLSTD)
9716+DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
9717+DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
9718+DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
9719+DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
9720+DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
9721+DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
9722+DECLARE_INSN(vlsegstbu, MATCH_VLSEGSTBU, MASK_VLSEGSTBU)
9723+DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
9724+DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
9725+DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
9726+DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
9727+DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
9728+DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
9729+DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H)
9730+DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
9731+DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
9732+DECLARE_INSN(vlstbu, MATCH_VLSTBU, MASK_VLSTBU)
9733+DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
9734+DECLARE_INSN(flh, MATCH_FLH, MASK_FLH)
9735+DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
9736+DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
9737+DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
9738+DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU)
9739+DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
9740+DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
9741+DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
9742+DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
9743+DECLARE_INSN(vxcptsave, MATCH_VXCPTSAVE, MASK_VXCPTSAVE)
9744+DECLARE_INSN(vmsv, MATCH_VMSV, MASK_VMSV)
9745+DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
9746+DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
9747+DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
9748+DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
9749+DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
9750+DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H)
9751+DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
9752+DECLARE_INSN(vxcptkill, MATCH_VXCPTKILL, MASK_VXCPTKILL)
9753+DECLARE_INSN(stop, MATCH_STOP, MASK_STOP)
9754+DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
9755+DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H)
9756+DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
9757+DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
9758+DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
9759+DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
9760+DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H)
9761+DECLARE_INSN(vssegsth, MATCH_VSSEGSTH, MASK_VSSEGSTH)
9762+DECLARE_INSN(sw, MATCH_SW, MASK_SW)
9763+DECLARE_INSN(vlstwu, MATCH_VLSTWU, MASK_VLSTWU)
9764+DECLARE_INSN(vfssegw, MATCH_VFSSEGW, MASK_VFSSEGW)
9765+DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
9766+DECLARE_INSN(sh, MATCH_SH, MASK_SH)
9767+DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H)
9768+DECLARE_INSN(vxcptaux, MATCH_VXCPTAUX, MASK_VXCPTAUX)
9769+DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
9770+DECLARE_INSN(vfssegd, MATCH_VFSSEGD, MASK_VFSSEGD)
9771+DECLARE_INSN(vlseghu, MATCH_VLSEGHU, MASK_VLSEGHU)
9772+DECLARE_INSN(movn, MATCH_MOVN, MASK_MOVN)
9773+DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
9774+DECLARE_INSN(vlsthu, MATCH_VLSTHU, MASK_VLSTHU)
9775+DECLARE_INSN(movz, MATCH_MOVZ, MASK_MOVZ)
9776+DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
9777+DECLARE_INSN(ld, MATCH_LD, MASK_LD)
9778+DECLARE_INSN(lb, MATCH_LB, MASK_LB)
9779+DECLARE_INSN(vlwu, MATCH_VLWU, MASK_VLWU)
9780+DECLARE_INSN(lh, MATCH_LH, MASK_LH)
9781+DECLARE_INSN(lw, MATCH_LW, MASK_LW)
9782+DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
9783+DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H)
9784+DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
9785+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
9786+DECLARE_INSN(vflstd, MATCH_VFLSTD, MASK_VFLSTD)
9787+DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
9788+DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H)
9789+DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
9790+DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
9791+DECLARE_INSN(vflstw, MATCH_VFLSTW, MASK_VFLSTW)
9792+DECLARE_INSN(vxcptevac, MATCH_VXCPTEVAC, MASK_VXCPTEVAC)
9793+DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
9794+DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
9795+DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
9796+DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
9797+DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
9798+DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
9799+DECLARE_INSN(vflsegw, MATCH_VFLSEGW, MASK_VFLSEGW)
9800+DECLARE_INSN(c_ld0, MATCH_C_LD0, MASK_C_LD0)
9801+DECLARE_INSN(vlsegbu, MATCH_VLSEGBU, MASK_VLSEGBU)
9802+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
9803+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
9804+DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
9805+DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
9806+DECLARE_INSN(rem, MATCH_REM, MASK_REM)
9807+DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
9808+DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
9809+DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
9810+DECLARE_INSN(vlsegsthu, MATCH_VLSEGSTHU, MASK_VLSEGSTHU)
9811+DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
9812+DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
9813+DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H)
9814+DECLARE_INSN(vflsegd, MATCH_VFLSEGD, MASK_VFLSEGD)
9815+DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
9816+DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
9817+DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
9818+DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H)
9819+DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
9820+DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
9821+DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
9822+DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H)
9823+DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
9824+DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
9825+DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
9826+DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
9827+DECLARE_INSN(c_srli32, MATCH_C_SRLI32, MASK_C_SRLI32)
9828+DECLARE_INSN(vfsstw, MATCH_VFSSTW, MASK_VFSSTW)
9829+DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
9830+DECLARE_INSN(c_beq, MATCH_C_BEQ, MASK_C_BEQ)
9831+DECLARE_INSN(vfsstd, MATCH_VFSSTD, MASK_VFSSTD)
9832+DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
9833+DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
9834+DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
9835+DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU)
9836+DECLARE_INSN(vmvv, MATCH_VMVV, MASK_VMVV)
9837+DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
9838+DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
9839+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
9840+DECLARE_INSN(and, MATCH_AND, MASK_AND)
9841+DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
9842+DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
9843+DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
9844+DECLARE_INSN(c_sub3, MATCH_C_SUB3, MASK_C_SUB3)
9845+DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H)
9846+DECLARE_INSN(vlb, MATCH_VLB, MASK_VLB)
9847+DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
9848+DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
9849+DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
9850+DECLARE_INSN(vlsegwu, MATCH_VLSEGWU, MASK_VLSEGWU)
9851+DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
9852+DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
9853+DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
9854+DECLARE_INSN(vfsw, MATCH_VFSW, MASK_VFSW)
9855+DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
9856+DECLARE_INSN(sb, MATCH_SB, MASK_SB)
9857+DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
9858+DECLARE_INSN(vfsd, MATCH_VFSD, MASK_VFSD)
9859+DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
9860+DECLARE_INSN(sd, MATCH_SD, MASK_SD)
9861+DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
9862+DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
9863+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
9864+DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
9865+DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
9866+DECLARE_INSN(c_bne, MATCH_C_BNE, MASK_C_BNE)
9867+DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
9868+DECLARE_INSN(vxcptcause, MATCH_VXCPTCAUSE, MASK_VXCPTCAUSE)
9869+DECLARE_INSN(vgetcfg, MATCH_VGETCFG, MASK_VGETCFG)
9870+DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
9871+DECLARE_INSN(vsetcfg, MATCH_VSETCFG, MASK_VSETCFG)
9872+DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
9873+DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
9874+DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H)
9875+DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
9876+DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
9877+DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
9878+DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
9879+DECLARE_INSN(c_srai32, MATCH_C_SRAI32, MASK_C_SRAI32)
9880+DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H)
9881+DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
9882+DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
9883+DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
9884+DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
9885+DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
9886+DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
9887+DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
9888+DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
9889+DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
9890+DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
9891+DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
9892+DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
9893+DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H)
9894+DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
9895+DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
9896+DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
9897+DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
9898+DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H)
9899+DECLARE_INSN(venqcnt, MATCH_VENQCNT, MASK_VENQCNT)
9900+DECLARE_INSN(vlbu, MATCH_VLBU, MASK_VLBU)
9901+DECLARE_INSN(vlhu, MATCH_VLHU, MASK_VLHU)
9902+DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
9903+DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
9904+DECLARE_INSN(c_or3, MATCH_C_OR3, MASK_C_OR3)
9905+DECLARE_INSN(c_and3, MATCH_C_AND3, MASK_C_AND3)
9906+DECLARE_INSN(vfssegstw, MATCH_VFSSEGSTW, MASK_VFSSEGSTW)
9907+DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
9908+DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
9909+DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
9910+DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
9911+DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
9912+DECLARE_INSN(vfssegstd, MATCH_VFSSEGSTD, MASK_VFSSEGSTD)
9913+DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
9914+DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
9915+DECLARE_INSN(vf, MATCH_VF, MASK_VF)
9916+DECLARE_INSN(vfmvv, MATCH_VFMVV, MASK_VFMVV)
9917+DECLARE_INSN(vflsegstw, MATCH_VFLSEGSTW, MASK_VFLSEGSTW)
9918+DECLARE_INSN(vxcptrestore, MATCH_VXCPTRESTORE, MASK_VXCPTRESTORE)
9919+DECLARE_INSN(vxcpthold, MATCH_VXCPTHOLD, MASK_VXCPTHOLD)
9920+DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
9921+DECLARE_INSN(vflsegstd, MATCH_VFLSEGSTD, MASK_VFLSEGSTD)
9922+DECLARE_INSN(vfld, MATCH_VFLD, MASK_VFLD)
9923+DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
9924+DECLARE_INSN(vflw, MATCH_VFLW, MASK_VFLW)
9925+DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
9926+DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H)
9927+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
9928+DECLARE_INSN(vssegw, MATCH_VSSEGW, MASK_VSSEGW)
9929+DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
9930+DECLARE_INSN(vssegh, MATCH_VSSEGH, MASK_VSSEGH)
9931+DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
9932+DECLARE_INSN(vssegd, MATCH_VSSEGD, MASK_VSSEGD)
9933+DECLARE_INSN(vssegb, MATCH_VSSEGB, MASK_VSSEGB)
9934+DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H)
9935+DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
9936+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
9937+DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
9938+DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
9939+DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
9940+DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
9941+DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H)
9942+DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
9943+DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
9944+DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
9945+DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
9946+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
9947+DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
9948+DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
9949+DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
9950+DECLARE_INSN(vsw, MATCH_VSW, MASK_VSW)
9951+DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
9952+DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H)
9953+DECLARE_INSN(vsb, MATCH_VSB, MASK_VSB)
9954+DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
9955+DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
9956+DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H)
9957+DECLARE_INSN(vsh, MATCH_VSH, MASK_VSH)
9958+DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
9959+DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S)
9960+DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
9961+DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
9962+DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W)
9963+DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L)
9964+DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
9965+DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
9966+DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D)
9967+DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
9968+DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
9969+DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
9970+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
9971+DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
9972+DECLARE_INSN(venqcmd, MATCH_VENQCMD, MASK_VENQCMD)
9973+DECLARE_INSN(or, MATCH_OR, MASK_OR)
9974+DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
9975+DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
9976+DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
9977+DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
9978+DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
9979+DECLARE_INSN(c_move, MATCH_C_MOVE, MASK_C_MOVE)
9980+DECLARE_INSN(fmovn, MATCH_FMOVN, MASK_FMOVN)
9981+DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
9982+DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
9983+DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
9984+DECLARE_INSN(fmovz, MATCH_FMOVZ, MASK_FMOVZ)
9985+DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
9986+DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H)
9987+DECLARE_INSN(vsd, MATCH_VSD, MASK_VSD)
9988+DECLARE_INSN(vlsegstw, MATCH_VLSEGSTW, MASK_VLSEGSTW)
9989+DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
9990+DECLARE_INSN(c_slliw, MATCH_C_SLLIW, MASK_C_SLLIW)
9991+DECLARE_INSN(vlsegstb, MATCH_VLSEGSTB, MASK_VLSEGSTB)
9992+DECLARE_INSN(vlsegstd, MATCH_VLSEGSTD, MASK_VLSEGSTD)
9993+DECLARE_INSN(vlsegsth, MATCH_VLSEGSTH, MASK_VLSEGSTH)
9994+DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
9995+DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
9996+DECLARE_INSN(c_slli32, MATCH_C_SLLI32, MASK_C_SLLI32)
9997+DECLARE_INSN(c_add3, MATCH_C_ADD3, MASK_C_ADD3)
9998+DECLARE_INSN(vgetvl, MATCH_VGETVL, MASK_VGETVL)
9999+DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
10000+DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H)
10001+DECLARE_INSN(vlsegb, MATCH_VLSEGB, MASK_VLSEGB)
10002+DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
10003+DECLARE_INSN(vlsegd, MATCH_VLSEGD, MASK_VLSEGD)
10004+DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH)
10005+DECLARE_INSN(vlsegh, MATCH_VLSEGH, MASK_VLSEGH)
10006+DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
10007+DECLARE_INSN(vlsegw, MATCH_VLSEGW, MASK_VLSEGW)
10008+DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
10009+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
10010+#endif
10011+#ifdef DECLARE_CSR
10012+DECLARE_CSR(fflags, CSR_FFLAGS)
10013+DECLARE_CSR(frm, CSR_FRM)
10014+DECLARE_CSR(fcsr, CSR_FCSR)
10015+DECLARE_CSR(stats, CSR_STATS)
10016+DECLARE_CSR(sup0, CSR_SUP0)
10017+DECLARE_CSR(sup1, CSR_SUP1)
10018+DECLARE_CSR(epc, CSR_EPC)
10019+DECLARE_CSR(badvaddr, CSR_BADVADDR)
10020+DECLARE_CSR(ptbr, CSR_PTBR)
10021+DECLARE_CSR(asid, CSR_ASID)
10022+DECLARE_CSR(count, CSR_COUNT)
10023+DECLARE_CSR(compare, CSR_COMPARE)
10024+DECLARE_CSR(evec, CSR_EVEC)
10025+DECLARE_CSR(cause, CSR_CAUSE)
10026+DECLARE_CSR(status, CSR_STATUS)
10027+DECLARE_CSR(hartid, CSR_HARTID)
10028+DECLARE_CSR(impl, CSR_IMPL)
10029+DECLARE_CSR(fatc, CSR_FATC)
10030+DECLARE_CSR(send_ipi, CSR_SEND_IPI)
10031+DECLARE_CSR(clear_ipi, CSR_CLEAR_IPI)
10032+DECLARE_CSR(reset, CSR_RESET)
10033+DECLARE_CSR(tohost, CSR_TOHOST)
10034+DECLARE_CSR(fromhost, CSR_FROMHOST)
10035+DECLARE_CSR(cycle, CSR_CYCLE)
10036+DECLARE_CSR(time, CSR_TIME)
10037+DECLARE_CSR(instret, CSR_INSTRET)
10038+DECLARE_CSR(uarch0, CSR_UARCH0)
10039+DECLARE_CSR(uarch1, CSR_UARCH1)
10040+DECLARE_CSR(uarch2, CSR_UARCH2)
10041+DECLARE_CSR(uarch3, CSR_UARCH3)
10042+DECLARE_CSR(uarch4, CSR_UARCH4)
10043+DECLARE_CSR(uarch5, CSR_UARCH5)
10044+DECLARE_CSR(uarch6, CSR_UARCH6)
10045+DECLARE_CSR(uarch7, CSR_UARCH7)
10046+DECLARE_CSR(uarch8, CSR_UARCH8)
10047+DECLARE_CSR(uarch9, CSR_UARCH9)
10048+DECLARE_CSR(uarch10, CSR_UARCH10)
10049+DECLARE_CSR(uarch11, CSR_UARCH11)
10050+DECLARE_CSR(uarch12, CSR_UARCH12)
10051+DECLARE_CSR(uarch13, CSR_UARCH13)
10052+DECLARE_CSR(uarch14, CSR_UARCH14)
10053+DECLARE_CSR(uarch15, CSR_UARCH15)
10054+DECLARE_CSR(counth, CSR_COUNTH)
10055+DECLARE_CSR(cycleh, CSR_CYCLEH)
10056+DECLARE_CSR(timeh, CSR_TIMEH)
10057+DECLARE_CSR(instreth, CSR_INSTRETH)
10058+#endif
10059+#ifdef DECLARE_CAUSE
10060+DECLARE_CAUSE("fflags", CAUSE_FFLAGS)
10061+DECLARE_CAUSE("frm", CAUSE_FRM)
10062+DECLARE_CAUSE("fcsr", CAUSE_FCSR)
10063+DECLARE_CAUSE("stats", CAUSE_STATS)
10064+DECLARE_CAUSE("sup0", CAUSE_SUP0)
10065+DECLARE_CAUSE("sup1", CAUSE_SUP1)
10066+DECLARE_CAUSE("epc", CAUSE_EPC)
10067+DECLARE_CAUSE("badvaddr", CAUSE_BADVADDR)
10068+DECLARE_CAUSE("ptbr", CAUSE_PTBR)
10069+DECLARE_CAUSE("asid", CAUSE_ASID)
10070+DECLARE_CAUSE("count", CAUSE_COUNT)
10071+DECLARE_CAUSE("compare", CAUSE_COMPARE)
10072+DECLARE_CAUSE("evec", CAUSE_EVEC)
10073+DECLARE_CAUSE("cause", CAUSE_CAUSE)
10074+DECLARE_CAUSE("status", CAUSE_STATUS)
10075+DECLARE_CAUSE("hartid", CAUSE_HARTID)
10076+DECLARE_CAUSE("impl", CAUSE_IMPL)
10077+DECLARE_CAUSE("fatc", CAUSE_FATC)
10078+DECLARE_CAUSE("send_ipi", CAUSE_SEND_IPI)
10079+DECLARE_CAUSE("clear_ipi", CAUSE_CLEAR_IPI)
10080+DECLARE_CAUSE("reset", CAUSE_RESET)
10081+DECLARE_CAUSE("tohost", CAUSE_TOHOST)
10082+DECLARE_CAUSE("fromhost", CAUSE_FROMHOST)
10083+DECLARE_CAUSE("cycle", CAUSE_CYCLE)
10084+DECLARE_CAUSE("time", CAUSE_TIME)
10085+DECLARE_CAUSE("instret", CAUSE_INSTRET)
10086+DECLARE_CAUSE("uarch0", CAUSE_UARCH0)
10087+DECLARE_CAUSE("uarch1", CAUSE_UARCH1)
10088+DECLARE_CAUSE("uarch2", CAUSE_UARCH2)
10089+DECLARE_CAUSE("uarch3", CAUSE_UARCH3)
10090+DECLARE_CAUSE("uarch4", CAUSE_UARCH4)
10091+DECLARE_CAUSE("uarch5", CAUSE_UARCH5)
10092+DECLARE_CAUSE("uarch6", CAUSE_UARCH6)
10093+DECLARE_CAUSE("uarch7", CAUSE_UARCH7)
10094+DECLARE_CAUSE("uarch8", CAUSE_UARCH8)
10095+DECLARE_CAUSE("uarch9", CAUSE_UARCH9)
10096+DECLARE_CAUSE("uarch10", CAUSE_UARCH10)
10097+DECLARE_CAUSE("uarch11", CAUSE_UARCH11)
10098+DECLARE_CAUSE("uarch12", CAUSE_UARCH12)
10099+DECLARE_CAUSE("uarch13", CAUSE_UARCH13)
10100+DECLARE_CAUSE("uarch14", CAUSE_UARCH14)
10101+DECLARE_CAUSE("uarch15", CAUSE_UARCH15)
10102+DECLARE_CAUSE("counth", CAUSE_COUNTH)
10103+DECLARE_CAUSE("cycleh", CAUSE_CYCLEH)
10104+DECLARE_CAUSE("timeh", CAUSE_TIMEH)
10105+DECLARE_CAUSE("instreth", CAUSE_INSTRETH)
10106+#endif
10107diff -urN original-gcc/gcc/config/riscv/riscv.opt gcc/gcc/config/riscv/riscv.opt
10108--- original-gcc/gcc/config/riscv/riscv.opt 1970-01-01 01:00:00.000000000 +0100
10109+++ gcc-4.9.2/gcc/config/riscv/riscv.opt 2015-03-07 09:51:45.667139025 +0100
10110@@ -0,0 +1,75 @@
10111+; Options for the MIPS port of the compiler
10112+;
10113+; Copyright (C) 2005, 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
10114+;
10115+; This file is part of GCC.
10116+;
10117+; GCC is free software; you can redistribute it and/or modify it under
10118+; the terms of the GNU General Public License as published by the Free
10119+; Software Foundation; either version 3, or (at your option) any later
10120+; version.
10121+;
10122+; GCC is distributed in the hope that it will be useful, but WITHOUT
10123+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10124+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
10125+; License for more details.
10126+;
10127+; You should have received a copy of the GNU General Public License
10128+; along with GCC; see the file COPYING3. If not see
10129+; <http://www.gnu.org/licenses/>.
10130+
10131+m32
10132+Target RejectNegative Mask(32BIT)
10133+Generate RV32 code
10134+
10135+m64
10136+Target RejectNegative InverseMask(32BIT, 64BIT)
10137+Generate RV64 code
10138+
10139+mbranch-cost=
10140+Target RejectNegative Joined UInteger Var(riscv_branch_cost)
10141+-mbranch-cost=COST Set the cost of branches to roughly COST instructions
10142+
10143+mhard-float
10144+Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI)
10145+Allow the use of hardware floating-point ABI and instructions
10146+
10147+mmemcpy
10148+Target Report Mask(MEMCPY)
10149+Don't optimize block moves
10150+
10151+mplt
10152+Target Report Var(TARGET_PLT) Init(1)
10153+When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
10154+
10155+msoft-float
10156+Target Report RejectNegative Mask(SOFT_FLOAT_ABI)
10157+Prevent the use of all hardware floating-point instructions
10158+
10159+mfdiv
10160+Target Report RejectNegative Mask(FDIV)
10161+Use hardware floating-point divide and square root instructions
10162+
10163+march=
10164+Target RejectNegative Joined Var(riscv_arch_string)
10165+-march= Generate code for given RISC-V ISA (e.g. RV64IM)
10166+
10167+mtune=
10168+Target RejectNegative Joined Var(riscv_tune_string)
10169+-mtune=PROCESSOR Optimize the output for PROCESSOR
10170+
10171+msmall-data-limit=
10172+Target Joined Separate UInteger Var(g_switch_value) Init(8)
10173+-msmall-data-limit=<number> Put global and static data smaller than <number> bytes into a special section (on some targets)
10174+
10175+matomic
10176+Target Report Mask(ATOMIC)
10177+Use hardware atomic memory instructions.
10178+
10179+mmuldiv
10180+Target Report Mask(MULDIV)
10181+Use hardware instructions for integer multiplication and division.
10182+
10183+mlra
10184+Target Report Var(riscv_lra_flag) Init(0) Save
10185+Use LRA instead of reload
10186diff -urN original-gcc/gcc/config/riscv/riscv-protos.h gcc/gcc/config/riscv/riscv-protos.h
10187--- original-gcc/gcc/config/riscv/riscv-protos.h 1970-01-01 01:00:00.000000000 +0100
10188+++ gcc-4.9.2/gcc/config/riscv/riscv-protos.h 2015-03-07 09:51:45.663139025 +0100
10189@@ -0,0 +1,89 @@
10190+/* Definition of RISC-V target for GNU compiler.
10191+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
10192+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
10193+ Based on MIPS target for GNU compiler.
10194+
10195+This file is part of GCC.
10196+
10197+GCC is free software; you can redistribute it and/or modify
10198+it under the terms of the GNU General Public License as published by
10199+the Free Software Foundation; either version 3, or (at your option)
10200+any later version.
10201+
10202+GCC is distributed in the hope that it will be useful,
10203+but WITHOUT ANY WARRANTY; without even the implied warranty of
10204+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10205+GNU General Public License for more details.
10206+
10207+You should have received a copy of the GNU General Public License
10208+along with GCC; see the file COPYING3. If not see
10209+<http://www.gnu.org/licenses/>. */
10210+
10211+#ifndef GCC_RISCV_PROTOS_H
10212+#define GCC_RISCV_PROTOS_H
10213+
10214+enum riscv_symbol_type {
10215+ SYMBOL_ABSOLUTE,
10216+ SYMBOL_GOT_DISP,
10217+ SYMBOL_TLS,
10218+ SYMBOL_TLS_LE,
10219+ SYMBOL_TLS_IE,
10220+ SYMBOL_TLS_GD
10221+};
10222+#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
10223+
10224+extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
10225+extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
10226+extern int riscv_address_insns (rtx, enum machine_mode, bool);
10227+extern int riscv_const_insns (rtx);
10228+extern int riscv_split_const_insns (rtx);
10229+extern int riscv_load_store_insns (rtx, rtx);
10230+extern rtx riscv_emit_move (rtx, rtx);
10231+extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
10232+extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
10233+extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
10234+extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
10235+extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
10236+
10237+extern rtx riscv_subword (rtx, bool);
10238+extern bool riscv_split_64bit_move_p (rtx, rtx);
10239+extern void riscv_split_doubleword_move (rtx, rtx);
10240+extern const char *riscv_output_move (rtx, rtx);
10241+extern const char *riscv_riscv_output_vector_move (enum machine_mode, rtx, rtx);
10242+#ifdef RTX_CODE
10243+extern void riscv_expand_scc (rtx *);
10244+extern void riscv_expand_conditional_branch (rtx *);
10245+#endif
10246+extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
10247+extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
10248+extern void riscv_set_return_address (rtx, rtx);
10249+extern bool riscv_expand_block_move (rtx, rtx, rtx);
10250+extern void riscv_expand_synci_loop (rtx, rtx);
10251+
10252+extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
10253+ HOST_WIDE_INT);
10254+extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
10255+ HOST_WIDE_INT);
10256+extern void riscv_order_regs_for_local_alloc (void);
10257+
10258+extern rtx riscv_return_addr (int, rtx);
10259+extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
10260+extern void riscv_expand_prologue (void);
10261+extern void riscv_expand_epilogue (bool);
10262+extern bool riscv_can_use_return_insn (void);
10263+extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
10264+
10265+extern enum reg_class riscv_secondary_reload_class (enum reg_class,
10266+ enum machine_mode,
10267+ rtx, bool);
10268+extern int riscv_class_max_nregs (enum reg_class, enum machine_mode);
10269+
10270+extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
10271+
10272+extern void irix_asm_output_align (FILE *, unsigned);
10273+extern const char *current_section_name (void);
10274+extern unsigned int current_section_flags (void);
10275+
10276+extern void riscv_expand_vector_init (rtx, rtx);
10277+
10278+#endif /* ! GCC_RISCV_PROTOS_H */
10279diff -urN original-gcc/gcc/config/riscv/sync.md gcc/gcc/config/riscv/sync.md
10280--- original-gcc/gcc/config/riscv/sync.md 1970-01-01 01:00:00.000000000 +0100
10281+++ gcc-4.9.2/gcc/config/riscv/sync.md 2015-03-07 09:51:45.667139025 +0100
10282@@ -0,0 +1,198 @@
10283+;; Machine description for RISC-V atomic operations.
10284+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
10285+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
10286+;; Based on MIPS target for GNU compiler.
10287+
10288+;; This file is part of GCC.
10289+
10290+;; GCC is free software; you can redistribute it and/or modify
10291+;; it under the terms of the GNU General Public License as published by
10292+;; the Free Software Foundation; either version 3, or (at your option)
10293+;; any later version.
10294+
10295+;; GCC is distributed in the hope that it will be useful,
10296+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
10297+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10298+;; GNU General Public License for more details.
10299+
10300+;; You should have received a copy of the GNU General Public License
10301+;; along with GCC; see the file COPYING3. If not see
10302+;; <http://www.gnu.org/licenses/>.
10303+
10304+(define_c_enum "unspec" [
10305+ UNSPEC_COMPARE_AND_SWAP
10306+ UNSPEC_SYNC_OLD_OP
10307+ UNSPEC_SYNC_EXCHANGE
10308+ UNSPEC_ATOMIC_STORE
10309+ UNSPEC_MEMORY_BARRIER
10310+])
10311+
10312+(define_code_iterator any_atomic [plus ior xor and])
10313+(define_code_attr atomic_optab
10314+ [(plus "add") (ior "or") (xor "xor") (and "and")])
10315+
10316+;; Memory barriers.
10317+
10318+(define_expand "mem_thread_fence"
10319+ [(match_operand:SI 0 "const_int_operand" "")] ;; model
10320+ ""
10321+{
10322+ if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
10323+ {
10324+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
10325+ MEM_VOLATILE_P (mem) = 1;
10326+ emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
10327+ }
10328+ DONE;
10329+})
10330+
10331+(define_insn "mem_thread_fence_1"
10332+ [(set (match_operand:BLK 0 "" "")
10333+ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
10334+ (match_operand:SI 1 "const_int_operand" "")] ;; model
10335+ ""
10336+{
10337+ switch (INTVAL (operands[1]))
10338+ {
10339+ case MEMMODEL_SEQ_CST:
10340+ case MEMMODEL_ACQ_REL:
10341+ return "fence rw,rw";
10342+ case MEMMODEL_ACQUIRE:
10343+ case MEMMODEL_CONSUME:
10344+ return "fence r,rw";
10345+ case MEMMODEL_RELEASE:
10346+ return "fence rw,w";
10347+ default:
10348+ gcc_unreachable();
10349+ }
10350+})
10351+
10352+;; Atomic memory operations.
10353+
10354+;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
10355+(define_insn "atomic_store<mode>"
10356+ [(set (match_operand:GPR 0 "memory_operand" "=A")
10357+ (unspec_volatile:GPR
10358+ [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
10359+ (match_operand:SI 2 "const_int_operand")] ;; model
10360+ UNSPEC_ATOMIC_STORE))]
10361+ "TARGET_ATOMIC"
10362+ "amoswap.<amo>%A2 zero,%z1,%0")
10363+
10364+(define_insn "atomic_<atomic_optab><mode>"
10365+ [(set (match_operand:GPR 0 "memory_operand" "+A")
10366+ (unspec_volatile:GPR
10367+ [(any_atomic:GPR (match_dup 0)
10368+ (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
10369+ (match_operand:SI 2 "const_int_operand")] ;; model
10370+ UNSPEC_SYNC_OLD_OP))]
10371+ "TARGET_ATOMIC"
10372+ "amo<insn>.<amo>%A2 zero,%z1,%0")
10373+
10374+(define_insn "atomic_fetch_<atomic_optab><mode>"
10375+ [(set (match_operand:GPR 0 "register_operand" "=&r")
10376+ (match_operand:GPR 1 "memory_operand" "+A"))
10377+ (set (match_dup 1)
10378+ (unspec_volatile:GPR
10379+ [(any_atomic:GPR (match_dup 1)
10380+ (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
10381+ (match_operand:SI 3 "const_int_operand")] ;; model
10382+ UNSPEC_SYNC_OLD_OP))]
10383+ "TARGET_ATOMIC"
10384+ "amo<insn>.<amo>%A3 %0,%z2,%1")
10385+
10386+(define_insn "atomic_exchange<mode>"
10387+ [(set (match_operand:GPR 0 "register_operand" "=&r")
10388+ (unspec_volatile:GPR
10389+ [(match_operand:GPR 1 "memory_operand" "+A")
10390+ (match_operand:SI 3 "const_int_operand")] ;; model
10391+ UNSPEC_SYNC_EXCHANGE))
10392+ (set (match_dup 1)
10393+ (match_operand:GPR 2 "register_operand" "0"))]
10394+ "TARGET_ATOMIC"
10395+ "amoswap.<amo>%A3 %0,%z2,%1")
10396+
10397+(define_insn "atomic_cas_value_strong<mode>"
10398+ [(set (match_operand:GPR 0 "register_operand" "=&r")
10399+ (match_operand:GPR 1 "memory_operand" "+A"))
10400+ (set (match_dup 1)
10401+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
10402+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")
10403+ (match_operand:SI 4 "const_int_operand") ;; mod_s
10404+ (match_operand:SI 5 "const_int_operand")] ;; mod_f
10405+ UNSPEC_COMPARE_AND_SWAP))
10406+ (clobber (match_scratch:GPR 6 "=&r"))]
10407+ "TARGET_ATOMIC"
10408+ "1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
10409+ [(set (attr "length") (const_int 16))])
10410+
10411+(define_expand "atomic_compare_and_swap<mode>"
10412+ [(match_operand:SI 0 "register_operand" "") ;; bool output
10413+ (match_operand:GPR 1 "register_operand" "") ;; val output
10414+ (match_operand:GPR 2 "memory_operand" "") ;; memory
10415+ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
10416+ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
10417+ (match_operand:SI 5 "const_int_operand" "") ;; is_weak
10418+ (match_operand:SI 6 "const_int_operand" "") ;; mod_s
10419+ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
10420+ "TARGET_ATOMIC"
10421+{
10422+ emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
10423+ operands[3], operands[4],
10424+ operands[6], operands[7]));
10425+
10426+ rtx compare = operands[1];
10427+ if (operands[3] != const0_rtx)
10428+ {
10429+ rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
10430+ compare = gen_reg_rtx (<MODE>mode);
10431+ emit_insn (gen_rtx_SET (VOIDmode, compare, difference));
10432+ }
10433+
10434+ rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
10435+ rtx result = gen_reg_rtx (<MODE>mode);
10436+ emit_insn (gen_rtx_SET (VOIDmode, result, eq));
10437+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_lowpart (SImode, result)));
10438+ DONE;
10439+})
10440+
10441+(define_expand "atomic_test_and_set"
10442+ [(match_operand:QI 0 "register_operand" "") ;; bool output
10443+ (match_operand:QI 1 "memory_operand" "+A") ;; memory
10444+ (match_operand:SI 2 "const_int_operand" "")] ;; model
10445+ "TARGET_ATOMIC"
10446+{
10447+ /* We have no QImode atomics, so use the address LSBs to form a mask,
10448+ then use an aligned SImode atomic. */
10449+ rtx result = operands[0];
10450+ rtx mem = operands[1];
10451+ rtx model = operands[2];
10452+ rtx addr = force_reg (Pmode, XEXP (mem, 0));
10453+
10454+ rtx aligned_addr = gen_reg_rtx (Pmode);
10455+ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
10456+
10457+ rtx aligned_mem = change_address (mem, SImode, aligned_addr);
10458+ set_mem_alias_set (aligned_mem, 0);
10459+
10460+ rtx offset = gen_reg_rtx (SImode);
10461+ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
10462+ GEN_INT (3)));
10463+
10464+ rtx tmp = gen_reg_rtx (SImode);
10465+ emit_move_insn (tmp, GEN_INT (1));
10466+
10467+ rtx shmt = gen_reg_rtx (SImode);
10468+ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
10469+
10470+ rtx word = gen_reg_rtx (SImode);
10471+ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
10472+
10473+ tmp = gen_reg_rtx (SImode);
10474+ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
10475+
10476+ emit_move_insn (gen_lowpart (SImode, result),
10477+ gen_rtx_LSHIFTRT (SImode, tmp,
10478+ gen_lowpart (SImode, shmt)));
10479+ DONE;
10480+})
10481diff -urN original-gcc/gcc/config/riscv/t-elf gcc/gcc/config/riscv/t-elf
10482--- original-gcc/gcc/config/riscv/t-elf 1970-01-01 01:00:00.000000000 +0100
10483+++ gcc-4.9.2/gcc/config/riscv/t-elf 2015-03-07 09:51:45.667139025 +0100
10484@@ -0,0 +1,4 @@
10485+# Build the libraries for both hard and soft floating point
10486+
10487+MULTILIB_OPTIONS = msoft-float m64/m32 mno-atomic
10488+MULTILIB_DIRNAMES = soft-float 64 32 no-atomic
10489diff -urN original-gcc/gcc/config/riscv/t-linux64 gcc/gcc/config/riscv/t-linux64
10490--- original-gcc/gcc/config/riscv/t-linux64 1970-01-01 01:00:00.000000000 +0100
10491+++ gcc-4.9.2/gcc/config/riscv/t-linux64 2015-03-07 09:51:45.667139025 +0100
10492@@ -0,0 +1,5 @@
10493+# Build the libraries for both hard and soft floating point
10494+
10495+MULTILIB_OPTIONS = m64/m32 msoft-float mno-atomic
10496+MULTILIB_DIRNAMES = 64 32 soft-float no-atomic
10497+MULTILIB_OSDIRNAMES = ../lib ../lib32
10498diff -urN original-gcc/gcc/config.gcc gcc/gcc/config.gcc
10499--- original-gcc/gcc/config.gcc 2014-09-17 16:16:02.000000000 +0200
10500+++ gcc-4.9.2/gcc/config.gcc 2015-03-07 09:57:54.195132741 +0100
10501@@ -447,6 +447,10 @@
10502 esac
10503 extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
10504 ;;
10505+riscv*)
10506+ cpu_type=riscv
10507+ need_64bit_hwint=yes
10508+ ;;
10509 rs6000*-*-*)
10510 need_64bit_hwint=yes
10511 extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
10512@@ -1949,6 +1953,27 @@
10513 cxx_target_objs="${cxx_target_objs} microblaze-c.o"
10514 tmake_file="${tmake_file} microblaze/t-microblaze"
10515 ;;
10516+riscv32*-*-linux*) # Linux RISC-V
10517+ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h riscv/default-32.h ${tm_file} riscv/linux.h riscv/linux64.h"
10518+ tmake_file="${tmake_file} riscv/t-linux64"
10519+ gnu_ld=yes
10520+ gas=yes
10521+ gcc_cv_initfini_array=yes
10522+ ;;
10523+riscv*-*-linux*) # Linux RISC-V
10524+ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h riscv/linux64.h"
10525+ tmake_file="${tmake_file} riscv/t-linux64"
10526+ gnu_ld=yes
10527+ gas=yes
10528+ gcc_cv_initfini_array=yes
10529+ ;;
10530+riscv*-*-elf*) # Linux RISC-V
10531+ tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
10532+ tmake_file="${tmake_file} riscv/t-elf"
10533+ gnu_ld=yes
10534+ gas=yes
10535+ gcc_cv_initfini_array=yes
10536+ ;;
10537 mips*-*-netbsd*) # NetBSD/mips, either endian.
10538 target_cpu_default="MASK_ABICALLS"
10539 tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
10540@@ -3756,6 +3781,31 @@
10541 done
10542 ;;
10543
10544+ riscv*-*-*)
10545+ supported_defaults="abi arch arch_32 arch_64 float tune tune_32 tune_64"
10546+
10547+ case ${with_float} in
10548+ "" | soft | hard)
10549+ # OK
10550+ ;;
10551+ *)
10552+ echo "Unknown floating point type used in --with-float=$with_float" 1>&2
10553+ exit 1
10554+ ;;
10555+ esac
10556+
10557+ case ${with_abi} in
10558+ "" | 32 | 64)
10559+ # OK
10560+ ;;
10561+ *)
10562+ echo "Unknown ABI used in --with-abi=$with_abi" 1>&2
10563+ exit 1
10564+ ;;
10565+ esac
10566+
10567+ ;;
10568+
10569 mips*-*-*)
10570 supported_defaults="abi arch arch_32 arch_64 float fpu nan tune tune_32 tune_64 divide llsc mips-plt synci"
10571
10572diff -urN original-gcc/gcc/configure gcc/gcc/configure
10573--- original-gcc/gcc/configure 2014-10-10 14:51:28.000000000 +0200
10574+++ gcc-4.9.2/gcc/configure 2015-03-07 09:57:54.211132741 +0100
10575@@ -23515,6 +23515,25 @@
10576 tls_first_minor=14
10577 tls_as_opt="-a32 --fatal-warnings"
10578 ;;
10579+ riscv*-*-*)
10580+ conftest_s='
10581+ .section .tdata,"awT",@progbits
10582+x:
10583+ .word 2
10584+ .text
10585+ la.tls.gd a0,x
10586+ la.tls.ie a1,x
10587+ lui a0,%tls_ie_pcrel_hi(x)
10588+ lw a0,%pcrel_lo(x)(a0)
10589+ add a0,a0,tp
10590+ lw a0,0(a0)
10591+ lui a0,%tprel_hi(x)
10592+ add a0,a0,tp,%tprel_add(x)
10593+ lw a0,%tprel_lo(x)(a0)'
10594+ tls_first_major=2
10595+ tls_first_minor=21
10596+ tls_as_opt='-m32 --fatal-warnings'
10597+ ;;
10598 s390-*-*)
10599 conftest_s='
10600 .section ".tdata","awT",@progbits
10601diff -urN original-gcc/gcc/configure.ac gcc/gcc/configure.ac
10602--- original-gcc/gcc/configure.ac 2014-10-10 14:51:28.000000000 +0200
10603+++ gcc-4.9.2/gcc/configure.ac 2015-03-07 09:57:54.219132741 +0100
10604@@ -3178,6 +3178,25 @@
10605 tls_first_minor=14
10606 tls_as_opt="-a32 --fatal-warnings"
10607 ;;
10608+ riscv*-*-*)
10609+ conftest_s='
10610+ .section .tdata,"awT",@progbits
10611+x:
10612+ .word 2
10613+ .text
10614+ la.tls.gd a0,x
10615+ la.tls.ie a1,x
10616+ lui a0,%tls_ie_pcrel_hi(x)
10617+ lw a0,%pcrel_lo(x)(a0)
10618+ add a0,a0,tp
10619+ lw a0,0(a0)
10620+ lui a0,%tprel_hi(x)
10621+ add a0,a0,tp,%tprel_add(x)
10622+ lw a0,%tprel_lo(x)(a0)'
10623+ tls_first_major=2
10624+ tls_first_minor=21
10625+ tls_as_opt='-m32 --fatal-warnings'
10626+ ;;
10627 s390-*-*)
10628 conftest_s='
10629 .section ".tdata","awT",@progbits
10630diff -urN original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
10631--- original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c 2013-12-31 08:05:35.000000000 +0100
10632+++ gcc-4.9.2/gcc/testsuite/gcc.c-torture/execute/20101011-1.c 2015-03-07 09:57:54.223132741 +0100
10633@@ -6,6 +6,9 @@
10634 #elif defined (__powerpc__) || defined (__PPC__) || defined (__ppc__) || defined (__POWERPC__) || defined (__ppc)
10635 /* On PPC division by zero does not trap. */
10636 # define DO_TEST 0
10637+#elif defined (__riscv__)
10638+ /* On RISC-V division by zero does not trap. */
10639+# define DO_TEST 0
10640 #elif defined (__SPU__)
10641 /* On SPU division by zero does not trap. */
10642 # define DO_TEST 0
10643diff -urN original-gcc/gcc/testsuite/gcc.dg/20020312-2.c gcc/gcc/testsuite/gcc.dg/20020312-2.c
10644--- original-gcc/gcc/testsuite/gcc.dg/20020312-2.c 2013-12-31 08:05:35.000000000 +0100
10645+++ gcc-4.9.2/gcc/testsuite/gcc.dg/20020312-2.c 2015-03-07 09:57:54.223132741 +0100
10646@@ -66,6 +66,8 @@
10647 # else
10648 # define PIC_REG "30"
10649 # endif
10650+#elif defined(__riscv__)
10651+/* No pic register. */
10652 #elif defined(__RX__)
10653 /* No pic register. */
10654 #elif defined(__s390__)
10655diff -urN original-gcc/gcc/testsuite/gcc.dg/20040813-1.c gcc/gcc/testsuite/gcc.dg/20040813-1.c
10656--- original-gcc/gcc/testsuite/gcc.dg/20040813-1.c 2013-12-31 08:05:35.000000000 +0100
10657+++ gcc-4.9.2/gcc/testsuite/gcc.dg/20040813-1.c 2015-03-07 09:57:54.227132741 +0100
10658@@ -2,7 +2,7 @@
10659 /* Contributed by Devang Patel <dpatel@apple.com> */
10660
10661 /* { dg-do compile } */
10662-/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* nios2-*-* *-*-vxworks* } { "*" } { "" } } */
10663+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* riscv*-*-* tile*-*-* nios2-*-* *-*-vxworks* } { "*" } { "" } } */
10664 /* { dg-options "-gstabs" } */
10665
10666 int
10667diff -urN original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c gcc/gcc/testsuite/gcc.dg/stack-usage-1.c
10668--- original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c 2013-12-31 08:05:35.000000000 +0100
10669+++ gcc-4.9.2/gcc/testsuite/gcc.dg/stack-usage-1.c 2015-03-07 09:57:54.227132741 +0100
10670@@ -61,6 +61,8 @@
10671 # else
10672 # define SIZE 240
10673 # endif
10674+#elif defined (__riscv__)
10675+# define SIZE 240
10676 #elif defined (__AVR__)
10677 # define SIZE 254
10678 #elif defined (__s390x__)
10679diff -urN original-gcc/libatomic/cas_n.c gcc/libatomic/cas_n.c
10680--- original-gcc/libatomic/cas_n.c 2014-02-20 18:43:53.000000000 +0100
10681+++ gcc-4.9.2/libatomic/cas_n.c 2015-03-07 09:57:54.227132741 +0100
10682@@ -70,7 +70,7 @@
10683 mask = -1;
10684 }
10685
10686- weval = *eptr << shift;
10687+ weval = (UWORD)*eptr << shift;
10688 wnewval = (UWORD)newval << shift;
10689 woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
10690 do
10691diff -urN original-gcc/libatomic/configure.tgt gcc/libatomic/configure.tgt
10692--- original-gcc/libatomic/configure.tgt 2014-01-02 23:24:30.000000000 +0100
10693+++ gcc-4.9.2/libatomic/configure.tgt 2015-03-07 09:57:54.227132741 +0100
10694@@ -29,6 +29,7 @@
10695 case "${target_cpu}" in
10696 alpha*) ARCH=alpha ;;
10697 rs6000 | powerpc*) ARCH=powerpc ;;
10698+ riscv*) ARCH=riscv ;;
10699 sh*) ARCH=sh ;;
10700
10701 arm*)
10702diff -urN original-gcc/libatomic/fop_n.c gcc/libatomic/fop_n.c
10703--- original-gcc/libatomic/fop_n.c 2014-01-02 23:24:30.000000000 +0100
10704+++ gcc-4.9.2/libatomic/fop_n.c 2015-03-07 09:57:54.231132741 +0100
10705@@ -1,4 +1,4 @@
10706-/* Copyright (C) 2012-2014 Free Software Foundation, Inc.
10707+/* Copyright (C) 2012-2015 Free Software Foundation, Inc.
10708 Contributed by Richard Henderson <rth@redhat.com>.
10709
10710 This file is part of the GNU Atomic Library (libatomic).
10711@@ -112,9 +112,9 @@
10712
10713 pre_barrier (smodel);
10714
10715- wptr = (UWORD *)mptr;
10716- shift = 0;
10717- mask = -1;
10718+ wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
10719+ shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
10720+ mask = SIZE(MASK) << shift;
10721
10722 wopval = (UWORD)opval << shift;
10723 woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
10724@@ -136,9 +136,9 @@
10725
10726 pre_barrier (smodel);
10727
10728- wptr = (UWORD *)mptr;
10729- shift = 0;
10730- mask = -1;
10731+ wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
10732+ shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
10733+ mask = SIZE(MASK) << shift;
10734
10735 wopval = (UWORD)opval << shift;
10736 woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
10737diff -urN original-gcc/libcpp/configure gcc/libcpp/configure
10738--- original-gcc/libcpp/configure 2014-10-30 09:28:58.000000000 +0100
10739+++ gcc-4.9.2/libcpp/configure 2015-03-07 09:57:54.231132741 +0100
10740@@ -7163,6 +7163,7 @@
10741 mips*-*-* | \
10742 mmix-*-* | \
10743 powerpc*-*-* | \
10744+ riscv*-*-* | \
10745 rs6000*-*-* | \
10746 s390*-*-* | \
10747 sparc*-*-* | \
10748diff -urN original-gcc/libcpp/configure.ac gcc/libcpp/configure.ac
10749--- original-gcc/libcpp/configure.ac 2014-02-24 16:08:00.000000000 +0100
10750+++ gcc-4.9.2/libcpp/configure.ac 2015-03-07 09:57:54.235132741 +0100
10751@@ -192,6 +192,7 @@
10752 mips*-*-* | \
10753 mmix-*-* | \
10754 powerpc*-*-* | \
10755+ riscv*-*-* | \
10756 rs6000*-*-* | \
10757 s390*-*-* | \
10758 sparc*-*-* | \
10759diff -urN original-gcc/libgcc/config/riscv/crti.S gcc/libgcc/config/riscv/crti.S
10760--- original-gcc/libgcc/config/riscv/crti.S 1970-01-01 01:00:00.000000000 +0100
10761+++ gcc-4.9.2/libgcc/config/riscv/crti.S 2015-03-07 09:51:45.667139025 +0100
10762@@ -0,0 +1 @@
10763+/* crti.S is empty because .init_array/.fini_array are used exclusively. */
10764diff -urN original-gcc/libgcc/config/riscv/crtn.S gcc/libgcc/config/riscv/crtn.S
10765--- original-gcc/libgcc/config/riscv/crtn.S 1970-01-01 01:00:00.000000000 +0100
10766+++ gcc-4.9.2/libgcc/config/riscv/crtn.S 2015-03-07 09:51:45.667139025 +0100
10767@@ -0,0 +1 @@
10768+/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
10769diff -urN original-gcc/libgcc/config/riscv/div.S gcc/libgcc/config/riscv/div.S
10770--- original-gcc/libgcc/config/riscv/div.S 1970-01-01 01:00:00.000000000 +0100
10771+++ gcc-4.9.2/libgcc/config/riscv/div.S 2015-03-07 09:51:45.667139025 +0100
10772@@ -0,0 +1,121 @@
10773+ .text
10774+ .align 2
10775+
10776+#ifndef __riscv64
10777+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
10778+# define __udivdi3 __udivsi3
10779+# define __umoddi3 __umodsi3
10780+# define __divdi3 __divsi3
10781+# define __moddi3 __modsi3
10782+#else
10783+ .globl __udivsi3
10784+__udivsi3:
10785+ /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
10786+ sll a0, a0, 32
10787+ sll a1, a1, 32
10788+ move t0, ra
10789+ jal __udivdi3
10790+ sext.w a0, a0
10791+ jr t0
10792+
10793+ .globl __umodsi3
10794+__umodsi3:
10795+ /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
10796+ sll a0, a0, 32
10797+ sll a1, a1, 32
10798+ srl a0, a0, 32
10799+ srl a1, a1, 32
10800+ move t0, ra
10801+ jal __udivdi3
10802+ sext.w a0, a1
10803+ jr t0
10804+
10805+ .globl __modsi3
10806+ __modsi3 = __moddi3
10807+
10808+ .globl __divsi3
10809+__divsi3:
10810+ /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
10811+ li t0, -1
10812+ beq a1, t0, .L20
10813+#endif
10814+
10815+ .globl __divdi3
10816+__divdi3:
10817+ bltz a0, .L10
10818+ bltz a1, .L11
10819+ /* Since the quotient is positive, fall into __udivdi3. */
10820+
10821+ .globl __udivdi3
10822+__udivdi3:
10823+ mv a2, a1
10824+ mv a1, a0
10825+ li a0, -1
10826+ beqz a2, .L5
10827+ li a3, 1
10828+ bgeu a2, a1, .L2
10829+.L1:
10830+ blez a2, .L2
10831+ slli a2, a2, 1
10832+ slli a3, a3, 1
10833+ bgtu a1, a2, .L1
10834+.L2:
10835+ li a0, 0
10836+.L3:
10837+ bltu a1, a2, .L4
10838+ sub a1, a1, a2
10839+ or a0, a0, a3
10840+.L4:
10841+ srli a3, a3, 1
10842+ srli a2, a2, 1
10843+ bnez a3, .L3
10844+.L5:
10845+ ret
10846+
10847+ .globl __umoddi3
10848+__umoddi3:
10849+ /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
10850+ move t0, ra
10851+ jal __udivdi3
10852+ move a0, a1
10853+ jr t0
10854+
10855+ /* Handle negative arguments to __divdi3. */
10856+.L10:
10857+ neg a0, a0
10858+ bgez a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
10859+ neg a1, a1
10860+ j __divdi3 /* Compute __udivdi3(-a0, -a1). */
10861+.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
10862+ neg a1, a1
10863+.L12:
10864+ move t0, ra
10865+ jal __divdi3
10866+ neg a0, a0
10867+ jr t0
10868+
10869+ .globl __moddi3
10870+__moddi3:
10871+ move t0, ra
10872+ bltz a1, .L31
10873+ bltz a0, .L32
10874+.L30:
10875+ jal __udivdi3 /* The dividend is not negative. */
10876+ move a0, a1
10877+ jr t0
10878+.L31:
10879+ neg a1, a1
10880+ bgez a0, .L30
10881+.L32:
10882+ neg a0, a0
10883+ jal __udivdi3 /* The dividend is hella negative. */
10884+ neg a0, a1
10885+ jr t0
10886+
10887+#ifdef __riscv64
10888+ /* continuation of __divsi3 */
10889+.L20:
10890+ sll t0, t0, 31
10891+ bne a0, t0, __divdi3
10892+ ret
10893+#endif
10894diff -urN original-gcc/libgcc/config/riscv/mul.S gcc/libgcc/config/riscv/mul.S
10895--- original-gcc/libgcc/config/riscv/mul.S 1970-01-01 01:00:00.000000000 +0100
10896+++ gcc-4.9.2/libgcc/config/riscv/mul.S 2015-03-07 09:51:45.667139025 +0100
10897@@ -0,0 +1,21 @@
10898+ .text
10899+ .align 2
10900+
10901+#ifndef __riscv64
10902+/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
10903+# define __muldi3 __mulsi3
10904+#endif
10905+
10906+ .globl __muldi3
10907+__muldi3:
10908+ mv a2, a0
10909+ li a0, 0
10910+.L1:
10911+ slli a3, a1, _RISCV_SZPTR-1
10912+ bgez a3, .L2
10913+ add a0, a0, a2
10914+.L2:
10915+ srli a1, a1, 1
10916+ slli a2, a2, 1
10917+ bnez a1, .L1
10918+ ret
10919diff -urN original-gcc/libgcc/config/riscv/riscv-fp.c gcc/libgcc/config/riscv/riscv-fp.c
10920--- original-gcc/libgcc/config/riscv/riscv-fp.c 1970-01-01 01:00:00.000000000 +0100
10921+++ gcc-4.9.2/libgcc/config/riscv/riscv-fp.c 2015-03-07 09:51:45.667139025 +0100
10922@@ -0,0 +1,178 @@
10923+/* Functions needed for soft-float on riscv-linux. Based on
10924+ rs6000/ppc64-fp.c with TF types removed.
10925+
10926+ Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
10927+ 2000, 2001, 2002, 2003, 2004, 2006, 2009 Free Software Foundation,
10928+ Inc.
10929+
10930+This file is part of GCC.
10931+
10932+GCC is free software; you can redistribute it and/or modify it under
10933+the terms of the GNU General Public License as published by the Free
10934+Software Foundation; either version 3, or (at your option) any later
10935+version.
10936+
10937+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
10938+WARRANTY; without even the implied warranty of MERCHANTABILITY or
10939+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
10940+for more details.
10941+
10942+Under Section 7 of GPL version 3, you are granted additional
10943+permissions described in the GCC Runtime Library Exception, version
10944+3.1, as published by the Free Software Foundation.
10945+
10946+You should have received a copy of the GNU General Public License and
10947+a copy of the GCC Runtime Library Exception along with this program;
10948+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
10949+<http://www.gnu.org/licenses/>. */
10950+
10951+#if defined(__riscv64)
10952+#include "fp-bit.h"
10953+
10954+extern DItype __fixdfdi (DFtype);
10955+extern DItype __fixsfdi (SFtype);
10956+extern USItype __fixunsdfsi (DFtype);
10957+extern USItype __fixunssfsi (SFtype);
10958+extern DFtype __floatdidf (DItype);
10959+extern DFtype __floatundidf (UDItype);
10960+extern SFtype __floatdisf (DItype);
10961+extern SFtype __floatundisf (UDItype);
10962+
10963+static DItype local_fixunssfdi (SFtype);
10964+static DItype local_fixunsdfdi (DFtype);
10965+
10966+DItype
10967+__fixdfdi (DFtype a)
10968+{
10969+ if (a < 0)
10970+ return - local_fixunsdfdi (-a);
10971+ return local_fixunsdfdi (a);
10972+}
10973+
10974+DItype
10975+__fixsfdi (SFtype a)
10976+{
10977+ if (a < 0)
10978+ return - local_fixunssfdi (-a);
10979+ return local_fixunssfdi (a);
10980+}
10981+
10982+USItype
10983+__fixunsdfsi (DFtype a)
10984+{
10985+ if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
10986+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
10987+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
10988+ return (SItype) a;
10989+}
10990+
10991+USItype
10992+__fixunssfsi (SFtype a)
10993+{
10994+ if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
10995+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
10996+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
10997+ return (SItype) a;
10998+}
10999+
11000+DFtype
11001+__floatdidf (DItype u)
11002+{
11003+ DFtype d;
11004+
11005+ d = (SItype) (u >> (sizeof (SItype) * 8));
11006+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
11007+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
11008+
11009+ return d;
11010+}
11011+
11012+DFtype
11013+__floatundidf (UDItype u)
11014+{
11015+ DFtype d;
11016+
11017+ d = (USItype) (u >> (sizeof (SItype) * 8));
11018+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
11019+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
11020+
11021+ return d;
11022+}
11023+
11024+SFtype
11025+__floatdisf (DItype u)
11026+{
11027+ DFtype f;
11028+
11029+ if (53 < (sizeof (DItype) * 8)
11030+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
11031+ {
11032+ if (! (- ((DItype) 1 << 53) < u
11033+ && u < ((DItype) 1 << 53)))
11034+ {
11035+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
11036+ {
11037+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
11038+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
11039+ }
11040+ }
11041+ }
11042+ f = (SItype) (u >> (sizeof (SItype) * 8));
11043+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
11044+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
11045+
11046+ return (SFtype) f;
11047+}
11048+
11049+SFtype
11050+__floatundisf (UDItype u)
11051+{
11052+ DFtype f;
11053+
11054+ if (53 < (sizeof (DItype) * 8)
11055+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
11056+ {
11057+ if (u >= ((UDItype) 1 << 53))
11058+ {
11059+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
11060+ {
11061+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
11062+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
11063+ }
11064+ }
11065+ }
11066+ f = (USItype) (u >> (sizeof (SItype) * 8));
11067+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
11068+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
11069+
11070+ return (SFtype) f;
11071+}
11072+
11073+/* This version is needed to prevent recursion; fixunsdfdi in libgcc
11074+ calls fixdfdi, which in turn calls calls fixunsdfdi. */
11075+
11076+static DItype
11077+local_fixunsdfdi (DFtype a)
11078+{
11079+ USItype hi, lo;
11080+
11081+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
11082+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
11083+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
11084+}
11085+
11086+/* This version is needed to prevent recursion; fixunssfdi in libgcc
11087+ calls fixsfdi, which in turn calls calls fixunssfdi. */
11088+
11089+static DItype
11090+local_fixunssfdi (SFtype original_a)
11091+{
11092+ DFtype a = original_a;
11093+ USItype hi, lo;
11094+
11095+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
11096+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
11097+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
11098+}
11099+
11100+#endif
11101diff -urN original-gcc/libgcc/config/riscv/t-dpbit gcc/libgcc/config/riscv/t-dpbit
11102--- original-gcc/libgcc/config/riscv/t-dpbit 1970-01-01 01:00:00.000000000 +0100
11103+++ gcc-4.9.2/libgcc/config/riscv/t-dpbit 2015-03-07 09:51:45.667139025 +0100
11104@@ -0,0 +1,4 @@
11105+LIB2ADD += dp-bit.c
11106+
11107+dp-bit.c: $(srcdir)/fp-bit.c
11108+ cat $(srcdir)/fp-bit.c > dp-bit.c
11109diff -urN original-gcc/libgcc/config/riscv/t-elf gcc/libgcc/config/riscv/t-elf
11110--- original-gcc/libgcc/config/riscv/t-elf 1970-01-01 01:00:00.000000000 +0100
11111+++ gcc-4.9.2/libgcc/config/riscv/t-elf 2015-03-07 09:51:45.667139025 +0100
11112@@ -0,0 +1,3 @@
11113+LIB2ADD += $(srcdir)/config/riscv/riscv-fp.c \
11114+ $(srcdir)/config/riscv/mul.S \
11115+ $(srcdir)/config/riscv/div.S
11116diff -urN original-gcc/libgcc/config/riscv/t-fpbit gcc/libgcc/config/riscv/t-fpbit
11117--- original-gcc/libgcc/config/riscv/t-fpbit 1970-01-01 01:00:00.000000000 +0100
11118+++ gcc-4.9.2/libgcc/config/riscv/t-fpbit 2015-03-07 09:51:45.667139025 +0100
11119@@ -0,0 +1,5 @@
11120+LIB2ADD += fp-bit.c
11121+
11122+fp-bit.c: $(srcdir)/fp-bit.c
11123+ echo '#define FLOAT' > fp-bit.c
11124+ cat $(srcdir)/fp-bit.c >> fp-bit.c
11125diff -urN original-gcc/libgcc/config/riscv/t-linux gcc/libgcc/config/riscv/t-linux
11126--- original-gcc/libgcc/config/riscv/t-linux 1970-01-01 01:00:00.000000000 +0100
11127+++ gcc-4.9.2/libgcc/config/riscv/t-linux 2015-03-07 09:51:45.667139025 +0100
11128@@ -0,0 +1,3 @@
11129+LIB2ADD += $(srcdir)/config/riscv/riscv-fp.c \
11130+ $(srcdir)/config/riscv/mul.S \
11131+ $(srcdir)/config/riscv/div.S
11132diff -urN original-gcc/libgcc/config/riscv/t-linux32 gcc/libgcc/config/riscv/t-linux32
11133--- original-gcc/libgcc/config/riscv/t-linux32 1970-01-01 01:00:00.000000000 +0100
11134+++ gcc-4.9.2/libgcc/config/riscv/t-linux32 2015-03-07 09:51:45.667139025 +0100
11135@@ -0,0 +1,2 @@
11136+HOST_LIBGCC2_CFLAGS += -m32
11137+CRTSTUFF_CFLAGS += -m32
11138diff -urN original-gcc/libgcc/config/riscv/t-tpbit gcc/libgcc/config/riscv/t-tpbit
11139--- original-gcc/libgcc/config/riscv/t-tpbit 1970-01-01 01:00:00.000000000 +0100
11140+++ gcc-4.9.2/libgcc/config/riscv/t-tpbit 2015-03-07 09:51:45.667139025 +0100
11141@@ -0,0 +1,10 @@
11142+LIB2ADD += tp-bit.c
11143+
11144+tp-bit.c: $(srcdir)/fp-bit.c
11145+ echo '#ifdef _RISCVEL' > tp-bit.c
11146+ echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c
11147+ echo '#endif' >> tp-bit.c
11148+ echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c
11149+ echo '# define TFLOAT' >> tp-bit.c
11150+ cat $(srcdir)/fp-bit.c >> tp-bit.c
11151+ echo '#endif' >> tp-bit.c
11152diff -urN original-gcc/libgcc/config.host gcc/libgcc/config.host
11153--- original-gcc/libgcc/config.host 2014-03-27 16:40:31.000000000 +0100
11154+++ gcc-4.9.2/libgcc/config.host 2015-03-07 09:57:54.235132741 +0100
11155@@ -167,6 +167,9 @@
11156 ;;
11157 rs6000*-*-*)
11158 ;;
11159+riscv*)
11160+ cpu_type=riscv
11161+ ;;
11162 score*-*-*)
11163 cpu_type=score
11164 ;;
11165@@ -1002,6 +1005,18 @@
11166 tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
11167 extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
11168 ;;
11169+riscv32*-*-linux*)
11170+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-linux riscv/t-linux32"
11171+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
11172+ ;;
11173+riscv*-*-linux*)
11174+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-linux"
11175+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
11176+ ;;
11177+riscv*-*-*)
11178+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-elf"
11179+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
11180+ ;;
11181 rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
11182 md_unwind_header=rs6000/aix-unwind.h
11183 tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
11184diff -urN original-gcc/libsanitizer/asan/asan_linux.cc gcc/libsanitizer/asan/asan_linux.cc
11185--- original-gcc/libsanitizer/asan/asan_linux.cc 2013-12-05 10:18:38.000000000 +0100
11186+++ gcc-4.9.2/libsanitizer/asan/asan_linux.cc 2015-03-07 09:57:54.235132741 +0100
11187@@ -98,6 +98,11 @@
11188 *pc = ucontext->uc_mcontext.gregs[31];
11189 *bp = ucontext->uc_mcontext.gregs[30];
11190 *sp = ucontext->uc_mcontext.gregs[29];
11191+# elif defined(__riscv__)
11192+ ucontext_t *ucontext = (ucontext_t*)context;
11193+ *pc = ucontext->uc_mcontext.gregs[REG_PC];
11194+ *bp = ucontext->uc_mcontext.gregs[REG_S0];
11195+ *sp = ucontext->uc_mcontext.gregs[REG_SP];
11196 #else
11197 # error "Unsupported arch"
11198 #endif
11199diff -urN original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc
11200--- original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc 2013-12-05 10:18:38.000000000 +0100
11201+++ gcc-4.9.2/libsanitizer/sanitizer_common/sanitizer_linux.cc 2015-03-07 09:57:54.239132741 +0100
11202@@ -93,11 +93,11 @@
11203 }
11204
11205 uptr internal_open(const char *filename, int flags) {
11206- return internal_syscall(__NR_open, (uptr)filename, flags);
11207+ return internal_syscall(__NR_openat, AT_FDCWD, (uptr)filename, flags);
11208 }
11209
11210 uptr internal_open(const char *filename, int flags, u32 mode) {
11211- return internal_syscall(__NR_open, (uptr)filename, flags, mode);
11212+ return internal_syscall(__NR_openat, AT_FDCWD, (uptr)filename, flags, mode);
11213 }
11214
11215 uptr OpenFile(const char *filename, bool write) {
11216@@ -139,7 +139,7 @@
11217
11218 uptr internal_stat(const char *path, void *buf) {
11219 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
11220- return internal_syscall(__NR_stat, (uptr)path, (uptr)buf);
11221+ return internal_syscall(__NR_newfstatat, AT_FDCWD, (uptr)path, (uptr)buf, 0);
11222 #else
11223 struct stat64 buf64;
11224 int res = internal_syscall(__NR_stat64, path, &buf64);
11225@@ -150,7 +150,7 @@
11226
11227 uptr internal_lstat(const char *path, void *buf) {
11228 #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
11229- return internal_syscall(__NR_lstat, (uptr)path, (uptr)buf);
11230+ return internal_syscall(__NR_newfstatat, AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW);
11231 #else
11232 struct stat64 buf64;
11233 int res = internal_syscall(__NR_lstat64, path, &buf64);
11234@@ -178,15 +178,15 @@
11235 }
11236
11237 uptr internal_dup2(int oldfd, int newfd) {
11238- return internal_syscall(__NR_dup2, oldfd, newfd);
11239+ return internal_syscall(__NR_dup3, oldfd, newfd, 0);
11240 }
11241
11242 uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
11243- return internal_syscall(__NR_readlink, (uptr)path, (uptr)buf, bufsize);
11244+ return internal_syscall(__NR_readlinkat, AT_FDCWD, (uptr)path, (uptr)buf, bufsize);
11245 }
11246
11247 uptr internal_unlink(const char *path) {
11248- return internal_syscall(__NR_unlink, (uptr)path);
11249+ return internal_syscall(__NR_unlinkat, AT_FDCWD, (uptr)path);
11250 }
11251
11252 uptr internal_sched_yield() {
11253@@ -588,7 +588,7 @@
11254 }
11255
11256 uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
11257- return internal_syscall(__NR_getdents, fd, (uptr)dirp, count);
11258+ return internal_syscall(__NR_getdents64, fd, (uptr)dirp, count);
11259 }
11260
11261 uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
11262diff -urN original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
11263--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc 2014-10-14 21:26:42.000000000 +0200
11264+++ gcc-4.9.2/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc 2015-03-07 09:57:54.235132741 +0100
11265@@ -63,7 +63,7 @@
11266 unsigned struct_statfs64_sz = sizeof(struct statfs64);
11267 } // namespace __sanitizer
11268
11269-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__sparc__)
11270+#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__sparc__) && !defined(__riscv__)
11271 COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
11272 #endif
11273
11274diff -urN original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
11275--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2014-10-14 21:26:42.000000000 +0200
11276+++ gcc-4.9.2/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2015-03-07 09:57:54.239132741 +0100
11277@@ -67,6 +67,10 @@
11278 const unsigned struct___old_kernel_stat_sz = 0;
11279 const unsigned struct_kernel_stat_sz = 144;
11280 const unsigned struct_kernel_stat64_sz = 104;
11281+#elif defined(__riscv__)
11282+ const unsigned struct___old_kernel_stat_sz = 0;
11283+ const unsigned struct_kernel_stat_sz = 128;
11284+ const unsigned struct_kernel_stat64_sz = 128;
11285 #elif defined(__sparc__) && defined(__arch64__)
11286 const unsigned struct___old_kernel_stat_sz = 0;
11287 const unsigned struct_kernel_stat_sz = 104;
11288@@ -367,7 +371,7 @@
11289 typedef long __sanitizer___kernel_off_t;
11290 #endif
11291
11292-#if defined(__powerpc__)
11293+#if defined(__powerpc__) || defined(__riscv__)
11294 typedef unsigned int __sanitizer___kernel_old_uid_t;
11295 typedef unsigned int __sanitizer___kernel_old_gid_t;
11296 #else