computing-offload/generic_vdpa/qemu/sw_64-Add-sw64-architecture-support.patch
jiangdongxu 79c4324644 add generic_vdpa basecode
Change-Id: I2d302dda68298877c65c99147f5bf22186a59aac
2024-09-19 17:19:46 +08:00

18278 lines
562 KiB
Diff

From d74713d8e656e8d0f7a5200122119802e639ff49 Mon Sep 17 00:00:00 2001
From: Lu Feifei <lufeifei@wxiat.com>
Date: Mon, 14 Mar 2022 11:04:02 +0800
Subject: [PATCH] sw_64: Add sw64 architecture support
Signed-off-by: Lu Feifei <lufeifei@wxiat.com>
---
configs/devices/sw64-softmmu/default.mak | 10 +
configs/targets/sw64-softmmu.mak | 8 +
configure | 7 +
disas.c | 2 +
disas/meson.build | 1 +
disas/sw64.c | 1242 +++++++
hw/Kconfig | 1 +
hw/meson.build | 1 +
hw/rtc/sun4v-rtc.c | 11 +
hw/sw64/Kconfig | 11 +
hw/sw64/Makefile.objs | 1 +
hw/sw64/core.h | 25 +
hw/sw64/core3.c | 182 ++
hw/sw64/core3_board.c | 493 +++
hw/sw64/meson.build | 10 +
hw/sw64/sw64_iommu.c | 567 ++++
hw/sw64/trace-events | 3 +
include/disas/dis-asm.h | 4 +
include/elf.h | 44 +
include/hw/sw64/sw64_iommu.h | 105 +
include/qemu/atomic.h | 2 +
include/qemu/timer.h | 10 +
include/sysemu/arch_init.h | 1 +
linux-headers/asm-sw64/kvm.h | 122 +
linux-headers/asm-sw64/unistd.h | 380 +++
linux-user/meson.build | 1 +
linux-user/sw64/cpu_loop.c | 108 +
linux-user/sw64/signal.c | 273 ++
linux-user/sw64/sockbits.h | 1 +
linux-user/sw64/syscall_nr.h | 471 +++
linux-user/sw64/target_cpu.h | 38 +
linux-user/sw64/target_elf.h | 14 +
linux-user/sw64/target_fcntl.h | 11 +
linux-user/sw64/target_signal.h | 98 +
linux-user/sw64/target_structs.h | 47 +
linux-user/sw64/target_syscall.h | 121 +
linux-user/sw64/termbits.h | 265 ++
meson.build | 12 +-
pc-bios/core3-hmcode | Bin 0 -> 225904 bytes
pc-bios/core3-reset | Bin 0 -> 5032 bytes
pc-bios/core4-hmcode | Bin 0 -> 243040 bytes
pc-bios/meson.build | 3 +
pc-bios/uefi-bios-sw | Bin 0 -> 3145728 bytes
qapi/machine.json | 2 +-
softmmu/qdev-monitor.c | 3 +-
target/Kconfig | 1 +
target/meson.build | 1 +
target/sw64/Kconfig | 2 +
target/sw64/Makefile.objs | 4 +
target/sw64/cpu-param.h | 24 +
target/sw64/cpu-qom.h | 47 +
target/sw64/cpu.c | 457 +++
target/sw64/cpu.h | 406 +++
target/sw64/exception.c | 76 +
target/sw64/float_helper.c | 846 +++++
target/sw64/helper.c | 349 ++
target/sw64/helper.h | 127 +
target/sw64/int_helper.c | 118 +
target/sw64/kvm.c | 215 ++
target/sw64/kvm_sw64.h | 47 +
target/sw64/machine.c | 18 +
target/sw64/meson.build | 19 +
target/sw64/profile.c | 2342 +++++++++++++
target/sw64/profile.h | 541 +++
target/sw64/simd_helper.c | 1058 ++++++
target/sw64/translate.c | 3798 ++++++++++++++++++++++
target/sw64/translate.h | 60 +
tcg/sw64/tcg-target-con-set.h | 39 +
tcg/sw64/tcg-target-con-str.h | 28 +
tcg/sw64/tcg-target.c.inc | 2109 ++++++++++++
tcg/sw64/tcg-target.h | 123 +
tcg/sw64/tcg-target.opc.h | 15 +
72 files changed, 17578 insertions(+), 3 deletions(-)
create mode 100644 configs/devices/sw64-softmmu/default.mak
create mode 100644 configs/targets/sw64-softmmu.mak
create mode 100755 disas/sw64.c
create mode 100644 hw/sw64/Kconfig
create mode 100644 hw/sw64/Makefile.objs
create mode 100644 hw/sw64/core.h
create mode 100644 hw/sw64/core3.c
create mode 100644 hw/sw64/core3_board.c
create mode 100644 hw/sw64/meson.build
create mode 100644 hw/sw64/sw64_iommu.c
create mode 100644 hw/sw64/trace-events
create mode 100644 include/hw/sw64/sw64_iommu.h
create mode 100644 linux-headers/asm-sw64/kvm.h
create mode 100644 linux-headers/asm-sw64/unistd.h
create mode 100644 linux-user/sw64/cpu_loop.c
create mode 100644 linux-user/sw64/signal.c
create mode 100644 linux-user/sw64/sockbits.h
create mode 100644 linux-user/sw64/syscall_nr.h
create mode 100644 linux-user/sw64/target_cpu.h
create mode 100644 linux-user/sw64/target_elf.h
create mode 100644 linux-user/sw64/target_fcntl.h
create mode 100644 linux-user/sw64/target_signal.h
create mode 100644 linux-user/sw64/target_structs.h
create mode 100644 linux-user/sw64/target_syscall.h
create mode 100644 linux-user/sw64/termbits.h
create mode 100755 pc-bios/core3-hmcode
create mode 100755 pc-bios/core3-reset
create mode 100755 pc-bios/core4-hmcode
create mode 100755 pc-bios/uefi-bios-sw
create mode 100644 target/sw64/Kconfig
create mode 100644 target/sw64/Makefile.objs
create mode 100644 target/sw64/cpu-param.h
create mode 100644 target/sw64/cpu-qom.h
create mode 100644 target/sw64/cpu.c
create mode 100644 target/sw64/cpu.h
create mode 100644 target/sw64/exception.c
create mode 100644 target/sw64/float_helper.c
create mode 100644 target/sw64/helper.c
create mode 100644 target/sw64/helper.h
create mode 100644 target/sw64/int_helper.c
create mode 100644 target/sw64/kvm.c
create mode 100644 target/sw64/kvm_sw64.h
create mode 100644 target/sw64/machine.c
create mode 100644 target/sw64/meson.build
create mode 100644 target/sw64/profile.c
create mode 100644 target/sw64/profile.h
create mode 100644 target/sw64/simd_helper.c
create mode 100644 target/sw64/translate.c
create mode 100644 target/sw64/translate.h
create mode 100755 tcg/sw64/tcg-target-con-set.h
create mode 100755 tcg/sw64/tcg-target-con-str.h
create mode 100755 tcg/sw64/tcg-target.c.inc
create mode 100755 tcg/sw64/tcg-target.h
create mode 100755 tcg/sw64/tcg-target.opc.h
diff --git a/configs/devices/sw64-softmmu/default.mak b/configs/devices/sw64-softmmu/default.mak
new file mode 100644
index 0000000000..0b4d56b43e
--- /dev/null
+++ b/configs/devices/sw64-softmmu/default.mak
@@ -0,0 +1,10 @@
+# Default configuration for sw64-softmmu
+
+# Uncomment the following lines to disable these optional devices:
+#
+#CONFIG_PCI_DEVICES=n
+#CONFIG_TEST_DEVICES=n
+
+# Boards:
+#
+CONFIG_CORE3=y
diff --git a/configs/targets/sw64-softmmu.mak b/configs/targets/sw64-softmmu.mak
new file mode 100644
index 0000000000..37cc2e05a6
--- /dev/null
+++ b/configs/targets/sw64-softmmu.mak
@@ -0,0 +1,8 @@
+# Default configuration for sw64-softmmu
+
+# Boards:
+#
+TARGET_ARCH=sw64
+TARGET_BASE_ARCH=sw64
+TARGET_ABI_DIR=sw64
+TARGET_SUPPORTS_MTTCG=y
diff --git a/configure b/configure
index 48c21775f3..9569d7a3d0 100755
--- a/configure
+++ b/configure
@@ -612,6 +612,9 @@ case "$cpu" in
sparc|sun4[cdmuv])
cpu="sparc"
;;
+ sw_64)
+ cpu="sw64"
+ ;;
*)
# This will result in either an error or falling back to TCI later
ARCH=unknown
@@ -3268,6 +3271,10 @@ alpha)
# Ensure there's only a single GP
QEMU_CFLAGS="-msmall-data $QEMU_CFLAGS"
;;
+sw*)
+ # Ensure there's only a single GP
+ QEMU_CFLAGS="-msmall-data $QEMU_CFLAGS"
+;;
esac
if test "$gprof" = "yes" ; then
diff --git a/disas.c b/disas.c
index 3dab4482d1..897de1d9a9 100644
--- a/disas.c
+++ b/disas.c
@@ -207,6 +207,8 @@ static void initialize_debug_host(CPUDebug *s)
s->info.cap_insn_split = 6;
#elif defined(__hppa__)
s->info.print_insn = print_insn_hppa;
+#elif defined(__sw_64__)
+ s->info.print_insn = print_insn_sw_64;
#endif
}
diff --git a/disas/meson.build b/disas/meson.build
index 449f99e1de..5c5daa69a7 100644
--- a/disas/meson.build
+++ b/disas/meson.build
@@ -20,4 +20,5 @@ common_ss.add(when: 'CONFIG_S390_DIS', if_true: files('s390.c'))
common_ss.add(when: 'CONFIG_SH4_DIS', if_true: files('sh4.c'))
common_ss.add(when: 'CONFIG_SPARC_DIS', if_true: files('sparc.c'))
common_ss.add(when: 'CONFIG_XTENSA_DIS', if_true: files('xtensa.c'))
+common_ss.add(when: 'CONFIG_SW64_DIS', if_true: files('sw64.c'))
common_ss.add(when: capstone, if_true: files('capstone.c'))
diff --git a/disas/sw64.c b/disas/sw64.c
new file mode 100755
index 0000000000..c5bd578e07
--- /dev/null
+++ b/disas/sw64.c
@@ -0,0 +1,1242 @@
+/*
+ * sw_64-dis.c -- Disassemble Sw_64 CORE3 instructions
+ *
+ * This file is part of libopcodes.
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3, or (at your option)
+ * any later version.
+ *
+ * It is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; see the file COPYING. If not, write to the Free
+ * Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include "qemu/osdep.h"
+#include "disas/dis-asm.h"
+
+#undef MAX
+
+struct sw_64_opcode {
+ /* The opcode name. */
+ const char *name;
+
+ /* The opcode itself. Those bits which will be filled in with
+ operands are zeroes. */
+ unsigned opcode;
+
+ /* The opcode mask. This is used by the disassembler. This is a
+ mask containing ones indicating those bits which must match the
+ opcode field, and zeroes indicating those bits which need not
+ match (and are presumably filled in by operands). */
+ unsigned mask;
+
+ /* One bit flags for the opcode. These are primarily used to
+ indicate specific processors and environments support the
+ instructions. The defined values are listed below. */
+ unsigned flags;
+
+ /* An array of operand codes. Each code is an index into the
+ operand table. They appear in the order which the operands must
+ appear in assembly code, and are terminated by a zero. */
+ unsigned char operands[5];
+};
+
+/* The table itself is sorted by major opcode number, and is otherwise
+ in the order in which the disassembler should consider
+ instructions. */
+extern const struct sw_64_opcode sw_64_opcodes[];
+extern const unsigned sw_64_num_opcodes;
+
+/* Values defined for the flags field of a struct sw_64_opcode. */
+
+/* CPU Availability */
+#define SW_OPCODE_BASE 0x0001 /* Base architecture insns. */
+#define SW_OPCODE_CORE3 0x0002 /* Core3 private insns. */
+#define SW_LITOP(i) (((i) >> 26) & 0x3D)
+
+#define SW_OPCODE_NOHM (~(SW_OPCODE_BASE|SW_OPCODE_CORE3))
+
+/* A macro to extract the major opcode from an instruction. */
+#define SW_OP(i) (((i) >> 26) & 0x3F)
+
+/* The total number of major opcodes. */
+#define SW_NOPS 0x40
+
+/* The operands table is an array of struct sw_64_operand. */
+
+struct sw_64_operand {
+ /* The number of bits in the operand. */
+ unsigned int bits : 5;
+
+ /* How far the operand is left shifted in the instruction. */
+ unsigned int shift : 5;
+
+ /* The default relocation type for this operand. */
+ signed int default_reloc : 16;
+
+ /* One bit syntax flags. */
+ unsigned int flags : 16;
+
+ /* Insertion function. This is used by the assembler. To insert an
+ operand value into an instruction, check this field.
+
+ If it is NULL, execute
+ i |= (op & ((1 << o->bits) - 1)) << o->shift;
+ (i is the instruction which we are filling in, o is a pointer to
+ this structure, and op is the opcode value; this assumes twos
+ complement arithmetic).
+
+ If this field is not NULL, then simply call it with the
+ instruction and the operand value. It will return the new value
+ of the instruction. If the ERRMSG argument is not NULL, then if
+ the operand value is illegal, *ERRMSG will be set to a warning
+ string (the operand will be inserted in any case). If the
+ operand value is legal, *ERRMSG will be unchanged (most operands
+ can accept any value). */
+ unsigned (*insert) (unsigned instruction, int op, const char **errmsg);
+
+ /* Extraction function. This is used by the disassembler. To
+ extract this operand type from an instruction, check this field.
+
+ If it is NULL, compute
+ op = ((i) >> o->shift) & ((1 << o->bits) - 1);
+ if ((o->flags & SW_OPERAND_SIGNED) != 0
+ && (op & (1 << (o->bits - 1))) != 0)
+ op -= 1 << o->bits;
+ (i is the instruction, o is a pointer to this structure, and op
+ is the result; this assumes twos complement arithmetic).
+
+ If this field is not NULL, then simply call it with the
+ instruction value. It will return the value of the operand. If
+ the INVALID argument is not NULL, *INVALID will be set to
+ non-zero if this operand type can not actually be extracted from
+ this operand (i.e., the instruction does not match). If the
+ operand is valid, *INVALID will not be changed. */
+ int (*extract) (unsigned instruction, int *invalid);
+};
+
+/* Elements in the table are retrieved by indexing with values from
+ the operands field of the sw_64_opcodes table. */
+
+extern const struct sw_64_operand sw_64_operands[];
+extern const unsigned sw_64_num_operands;
+/* Values defined for the flags field of a struct sw_64_operand. */
+
+/* Mask for selecting the type for typecheck purposes */
+#define SW_OPERAND_TYPECHECK_MASK \
+ (SW_OPERAND_PARENS | SW_OPERAND_COMMA | SW_OPERAND_IR | \
+ SW_OPERAND_FPR | SW_OPERAND_RELATIVE | SW_OPERAND_SIGNED | \
+ SW_OPERAND_UNSIGNED)
+
+/* This operand does not actually exist in the assembler input. This
+ is used to support extended mnemonics, for which two operands fields
+ are identical. The assembler should call the insert function with
+ any op value. The disassembler should call the extract function,
+ ignore the return value, and check the value placed in the invalid
+ argument. */
+#define SW_OPERAND_FAKE 01
+
+/* The operand should be wrapped in parentheses rather than separated
+ from the previous by a comma. This is used for the load and store
+ instructions which want their operands to look like "Ra,disp(Rb)". */
+#define SW_OPERAND_PARENS 02
+
+/* Used in combination with PARENS, this supresses the supression of
+ the comma. This is used for "jmp Ra,(Rb),hint". */
+#define SW_OPERAND_COMMA 04
+
+/* This operand names an integer register. */
+#define SW_OPERAND_IR 010
+
+/* This operand names a floating point register. */
+#define SW_OPERAND_FPR 020
+
+/* This operand is a relative branch displacement. The disassembler
+ prints these symbolically if possible. */
+#define SW_OPERAND_RELATIVE 040
+
+/* This operand takes signed values. */
+#define SW_OPERAND_SIGNED 0100
+
+/* This operand takes unsigned values. This exists primarily so that
+ a flags value of 0 can be treated as end-of-arguments. */
+#define SW_OPERAND_UNSIGNED 0200
+
+/* Supress overflow detection on this field. This is used for hints. */
+#define SW_OPERAND_NOOVERFLOW 0400
+
+/* Mask for optional argument default value. */
+#define SW_OPERAND_OPTIONAL_MASK 07000
+
+/* This operand defaults to zero. This is used for jump hints. */
+#define SW_OPERAND_DEFAULT_ZERO 01000
+
+/* This operand should default to the first (real) operand and is used
+ in conjunction with SW_OPERAND_OPTIONAL. This allows
+ "and $0,3,$0" to be written as "and $0,3", etc. I don't like
+ it, but it's what DEC does. */
+#define SW_OPERAND_DEFAULT_FIRST 02000
+
+/* Similarly, this operand should default to the second (real) operand.
+ This allows "negl $0" instead of "negl $0,$0". */
+#define SW_OPERAND_DEFAULT_SECOND 04000
+
+/* Register common names */
+
+#define SW_REG_V0 0
+#define SW_REG_T0 1
+#define SW_REG_T1 2
+#define SW_REG_T2 3
+#define SW_REG_T3 4
+#define SW_REG_T4 5
+#define SW_REG_T5 6
+#define SW_REG_T6 7
+#define SW_REG_T7 8
+#define SW_REG_S0 9
+#define SW_REG_S1 10
+#define SW_REG_S2 11
+#define SW_REG_S3 12
+#define SW_REG_S4 13
+#define SW_REG_S5 14
+#define SW_REG_FP 15
+#define SW_REG_A0 16
+#define SW_REG_A1 17
+#define SW_REG_A2 18
+#define SW_REG_A3 19
+#define SW_REG_A4 20
+#define SW_REG_A5 21
+#define SW_REG_T8 22
+#define SW_REG_T9 23
+#define SW_REG_T10 24
+#define SW_REG_T11 25
+#define SW_REG_RA 26
+#define SW_REG_PV 27
+#define SW_REG_T12 27
+#define SW_REG_AT 28
+#define SW_REG_GP 29
+#define SW_REG_SP 30
+#define SW_REG_ZERO 31
+
+enum bfd_reloc_code_real {
+ BFD_RELOC_23_PCREL_S2,
+ BFD_RELOC_SW_64_HINT
+};
+
+static unsigned insert_rba(unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | (((insn >> 21) & 0x1f) << 16);
+}
+
+static int extract_rba(unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL
+ && ((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+/* The same for the RC field. */
+static unsigned insert_rca(unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | ((insn >> 21) & 0x1f);
+}
+
+static unsigned insert_rdc(unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | ((insn >> 5) & 0x1f);
+}
+
+static int extract_rdc(unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL
+ && ((insn >> 5) & 0x1f) != (insn & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+static int extract_rca(unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL
+ && ((insn >> 21) & 0x1f) != (insn & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+/* Fake arguments in which the registers must be set to ZERO. */
+static unsigned insert_za(unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | (31 << 21);
+}
+
+static int extract_za(unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 21) & 0x1f) != 31)
+ *invalid = 1;
+ return 0;
+}
+
+static unsigned insert_zb(unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | (31 << 16);
+}
+
+static int extract_zb(unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 16) & 0x1f) != 31)
+ *invalid = 1;
+ return 0;
+}
+
+static unsigned insert_zc(unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | 31;
+}
+
+static int extract_zc(unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && (insn & 0x1f) != 31)
+ *invalid = 1;
+ return 0;
+}
+
+
+/* The displacement field of a Branch format insn. */
+
+static unsigned insert_bdisp(unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **)NULL && (value & 3))
+ *errmsg = "branch operand unaligned";
+ return insn | ((value / 4) & 0x1FFFFF);
+}
+
+static int extract_bdisp(unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0x1FFFFF) ^ 0x100000) - 0x100000);
+}
+
+static unsigned insert_bdisp26(unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **)NULL && (value & 3))
+ *errmsg = "branch operand unaligned";
+ return insn | ((value / 4) & 0x3FFFFFF);
+}
+
+static int extract_bdisp26(unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0x3FFFFFF) ^ 0x2000000) - 0x2000000);
+}
+
+/* The hint field of a JMP/JSR insn. */
+/* sw use 16 bits hint disp. */
+static unsigned insert_jhint(unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **)NULL && (value & 3))
+ *errmsg = "jump hint unaligned";
+ return insn | ((value / 4) & 0xFFFF);
+}
+
+static int extract_jhint(unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0xFFFF) ^ 0x8000) - 0x8000);
+}
+
+/* The hint field of an CORE3 HW_JMP/JSR insn. */
+
+static unsigned insert_sw4hwjhint(unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **)NULL && (value & 3))
+ *errmsg = "jump hint unaligned";
+ return insn | ((value / 4) & 0x1FFF);
+}
+
+static int extract_sw4hwjhint(unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0x1FFF) ^ 0x1000) - 0x1000);
+}
+
+/* The operands table. */
+
+const struct sw_64_operand sw_64_operands[] = {
+ /* The fields are bits, shift, insert, extract, flags */
+ /* The zero index is used to indicate end-of-list */
+#define UNUSED 0
+ { 0, 0, 0, 0, 0, 0 },
+
+ /* The plain integer register fields. */
+#define RA (UNUSED + 1)
+ { 5, 21, 0, SW_OPERAND_IR, 0, 0 },
+#define RB (RA + 1)
+ { 5, 16, 0, SW_OPERAND_IR, 0, 0 },
+#define RC (RB + 1)
+ { 5, 0, 0, SW_OPERAND_IR, 0, 0 },
+
+ /* The plain fp register fields. */
+#define FA (RC + 1)
+ { 5, 21, 0, SW_OPERAND_FPR, 0, 0 },
+#define FB (FA + 1)
+ { 5, 16, 0, SW_OPERAND_FPR, 0, 0 },
+#define FC (FB + 1)
+ { 5, 0, 0, SW_OPERAND_FPR, 0, 0 },
+
+ /* The integer registers when they are ZERO. */
+#define ZA (FC + 1)
+ { 5, 21, 0, SW_OPERAND_FAKE, insert_za, extract_za },
+#define ZB (ZA + 1)
+ { 5, 16, 0, SW_OPERAND_FAKE, insert_zb, extract_zb },
+#define ZC (ZB + 1)
+ { 5, 0, 0, SW_OPERAND_FAKE, insert_zc, extract_zc },
+
+ /* The RB field when it needs parentheses. */
+#define PRB (ZC + 1)
+ { 5, 16, 0, SW_OPERAND_IR | SW_OPERAND_PARENS, 0, 0 },
+
+ /* The RB field when it needs parentheses _and_ a preceding comma. */
+#define CPRB (PRB + 1)
+ { 5, 16, 0,
+ SW_OPERAND_IR | SW_OPERAND_PARENS | SW_OPERAND_COMMA, 0, 0 },
+
+ /* The RB field when it must be the same as the RA field. */
+#define RBA (CPRB + 1)
+ { 5, 16, 0, SW_OPERAND_FAKE, insert_rba, extract_rba },
+
+ /* The RC field when it must be the same as the RB field. */
+#define RCA (RBA + 1)
+ { 5, 0, 0, SW_OPERAND_FAKE, insert_rca, extract_rca },
+
+#define RDC (RCA + 1)
+ { 5, 0, 0, SW_OPERAND_FAKE, insert_rdc, extract_rdc },
+
+ /* The RC field when it can *default* to RA. */
+#define DRC1 (RDC + 1)
+ { 5, 0, 0,
+ SW_OPERAND_IR | SW_OPERAND_DEFAULT_FIRST, 0, 0 },
+
+ /* The RC field when it can *default* to RB. */
+#define DRC2 (DRC1 + 1)
+ { 5, 0, 0,
+ SW_OPERAND_IR | SW_OPERAND_DEFAULT_SECOND, 0, 0 },
+
+ /* The FC field when it can *default* to RA. */
+#define DFC1 (DRC2 + 1)
+ { 5, 0, 0,
+ SW_OPERAND_FPR | SW_OPERAND_DEFAULT_FIRST, 0, 0 },
+
+ /* The FC field when it can *default* to RB. */
+#define DFC2 (DFC1 + 1)
+ { 5, 0, 0,
+ SW_OPERAND_FPR | SW_OPERAND_DEFAULT_SECOND, 0, 0 },
+
+ /* The unsigned 8-bit literal of Operate format insns. */
+#define LIT (DFC2 + 1)
+ { 8, 13, -LIT, SW_OPERAND_UNSIGNED, 0, 0 },
+
+ /* The signed 16-bit displacement of Memory format insns. From here
+ we can't tell what relocation should be used, so don't use a default. */
+#define MDISP (LIT + 1)
+ { 16, 0, -MDISP, SW_OPERAND_SIGNED, 0, 0 },
+
+ /* The signed "23-bit" aligned displacement of Branch format insns. */
+#define BDISP (MDISP + 1)
+ { 21, 0, BFD_RELOC_23_PCREL_S2,
+ SW_OPERAND_RELATIVE, insert_bdisp, extract_bdisp },
+
+ /* The 26-bit hmcode function for sys_call and sys_call / b. */
+#define HMFN (BDISP + 1)
+ { 25, 0, -HMFN, SW_OPERAND_UNSIGNED, 0, 0 },
+
+ /* sw jsr/ret insntructions has no function bits. */
+ /* The optional signed "16-bit" aligned displacement of the JMP/JSR hint. */
+#define JMPHINT (HMFN + 1)
+ { 16, 0, BFD_RELOC_SW_64_HINT,
+ SW_OPERAND_RELATIVE | SW_OPERAND_DEFAULT_ZERO | SW_OPERAND_NOOVERFLOW,
+ insert_jhint, extract_jhint },
+
+ /* The optional hint to RET/JSR_COROUTINE. */
+#define RETHINT (JMPHINT + 1)
+ { 16, 0, -RETHINT,
+ SW_OPERAND_UNSIGNED | SW_OPERAND_DEFAULT_ZERO, 0, 0 },
+
+ /* The 12-bit displacement for the core3 hw_{ld,st} (pal1b/pal1f) insns. */
+#define HWDISP (RETHINT + 1)
+ { 12, 0, -HWDISP, SW_OPERAND_SIGNED, 0, 0 },
+
+ /* The 16-bit combined index/scoreboard mask for the core3
+ hw_m[ft]pr (pal19/pal1d) insns. */
+#define HWINDEX (HWDISP + 1)
+ { 16, 0, -HWINDEX, SW_OPERAND_UNSIGNED, 0, 0 },
+
+ /* The 13-bit branch hint for the core3 hw_jmp/jsr (pal1e) insn. */
+#define HWJMPHINT (HWINDEX + 1)
+ { 8, 0, -HWJMPHINT,
+ SW_OPERAND_RELATIVE | SW_OPERAND_DEFAULT_ZERO | SW_OPERAND_NOOVERFLOW,
+ insert_sw4hwjhint, extract_sw4hwjhint },
+
+ /* for the third operand of ternary operands integer insn. */
+#define R3 (HWJMPHINT + 1)
+ { 5, 5, 0, SW_OPERAND_IR, 0, 0 },
+ /* The plain fp register fields */
+#define F3 (R3 + 1)
+ { 5, 5, 0, SW_OPERAND_FPR, 0, 0 },
+ /* sw simd settle instruction lit */
+#define FMALIT (F3 + 1)
+ { 5, 5, -FMALIT, SW_OPERAND_UNSIGNED, 0, 0 }, //V1.1
+#define LMDISP (FMALIT + 1)
+ { 15, 0, -LMDISP, SW_OPERAND_UNSIGNED, 0, 0 },
+#define RPIINDEX (LMDISP + 1)
+ { 8, 0, -RPIINDEX, SW_OPERAND_UNSIGNED, 0, 0 },
+#define ATMDISP (RPIINDEX + 1)
+ { 12, 0, -ATMDISP, SW_OPERAND_SIGNED, 0, 0 },
+#define DISP13 (ATMDISP + 1)
+ { 13, 13, -DISP13, SW_OPERAND_SIGNED, 0, 0},
+#define BDISP26 (DISP13 + 1)
+ { 26, 0, 222,
+ SW_OPERAND_RELATIVE, insert_bdisp26, extract_bdisp26 },
+#define DPFTH (BDISP26 + 1)
+ { 5, 21, -DPFTH, SW_OPERAND_UNSIGNED, 0, 0}
+};
+
+const unsigned sw_64_num_operands = sizeof(sw_64_operands) / sizeof(*sw_64_operands);
+
+/* Macros used to form opcodes. */
+
+/* The main opcode. */
+#define OP(x) (((x) & 0x3F) << 26)
+#define OP_MASK 0xFC000000
+
+/* Branch format instructions. */
+#define BRA_(oo) OP(oo)
+#define BRA_MASK OP_MASK
+#define BRA(oo) BRA_(oo), BRA_MASK
+
+#ifdef HUANGLM20171113
+/* Floating point format instructions. */
+#define FP_(oo,fff) (OP(oo) | (((fff) & 0x7FF) << 5))
+#define FP_MASK (OP_MASK | 0xFFE0)
+#define FP(oo,fff) FP_(oo,fff), FP_MASK
+
+#else
+/* Floating point format instructions. */
+#define FP_(oo,fff) (OP(oo) | (((fff) & 0xFF) << 5))
+#define FP_MASK (OP_MASK | 0x1FE0)
+#define FP(oo,fff) FP_(oo,fff), FP_MASK
+
+#define FMA_(oo,fff) (OP(oo) | (((fff) & 0x3F) << 10 ))
+#define FMA_MASK (OP_MASK | 0xFC00)
+#define FMA(oo,fff) FMA_(oo,fff), FMA_MASK
+#endif
+
+/* Memory format instructions. */
+#define MEM_(oo) OP(oo)
+#define MEM_MASK OP_MASK
+#define MEM(oo) MEM_(oo), MEM_MASK
+
+/* Memory/Func Code format instructions. */
+#define MFC_(oo,ffff) (OP(oo) | ((ffff) & 0xFFFF))
+#define MFC_MASK (OP_MASK | 0xFFFF)
+#define MFC(oo,ffff) MFC_(oo,ffff), MFC_MASK
+
+/* Memory/Branch format instructions. */
+#define MBR_(oo,h) (OP(oo) | (((h) & 3) << 14))
+#define MBR_MASK (OP_MASK | 0xC000)
+#define MBR(oo,h) MBR_(oo,h), MBR_MASK
+
+/* Now sw Operate format instructions is different with SW1. */
+#define OPR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5))
+#define OPRL_(oo,ff) (OPR_((oo), (ff)) )
+#define OPR_MASK (OP_MASK | 0x1FE0)
+#define OPR(oo,ff) OPR_(oo,ff), OPR_MASK
+#define OPRL(oo,ff) OPRL_(oo,ff), OPR_MASK
+
+/* sw ternary operands Operate format instructions. */
+#define TOPR_(oo,ff) (OP(oo) | (((ff) & 0x07) << 10))
+#define TOPRL_(oo,ff) (TOPR_((oo), (ff)))
+#define TOPR_MASK (OP_MASK | 0x1C00)
+#define TOPR(oo,ff) TOPR_(oo,ff), TOPR_MASK
+#define TOPRL(oo,ff) TOPRL_(oo,ff), TOPR_MASK
+
+/* sw atom instructions. */
+#define ATMEM_(oo,h) (OP(oo) | (((h) & 0xF) << 12))
+#define ATMEM_MASK (OP_MASK | 0xF000)
+#define ATMEM(oo,h) ATMEM_(oo,h), ATMEM_MASK
+
+/* sw privilege instructions. */
+#define PRIRET_(oo,h) (OP(oo) | (((h) & 0x1) << 20))
+#define PRIRET_MASK (OP_MASK | 0x100000)
+#define PRIRET(oo,h) PRIRET_(oo,h), PRIRET_MASK
+
+/* sw rpi_rcsr,rpi_wcsr. */
+#define CSR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 8))
+#define CSR_MASK (OP_MASK | 0xFF00)
+#define CSR(oo,ff) CSR_(oo,ff), CSR_MASK
+
+#define PCD_(oo,ff) (OP(oo) | (ff << 25))
+#define PCD_MASK OP_MASK
+#define PCD(oo,ff) PCD_(oo,ff), PCD_MASK
+
+/* Hardware memory (hw_{ld,st}) instructions. */
+#define HWMEM_(oo,f) (OP(oo) | (((f) & 0xF) << 12))
+#define HWMEM_MASK (OP_MASK | 0xF000)
+#define HWMEM(oo,f) HWMEM_(oo,f), HWMEM_MASK
+
+#define LOGX_(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10))
+#define LOGX_MASK (0xF0000000)
+#define LOGX(oo,ff) LOGX_(oo,ff), LOGX_MASK
+
+/* Abbreviations for instruction subsets. */
+#define BASE SW_OPCODE_BASE
+#define CORE3 SW_OPCODE_CORE3
+
+/* Common combinations of arguments. */
+#define ARG_NONE { 0 }
+#define ARG_BRA { RA, BDISP }
+#define ARG_FBRA { FA, BDISP }
+#define ARG_FP { FA, FB, DFC1 }
+#define ARG_FPZ1 { ZA, FB, DFC1 }
+#define ARG_MEM { RA, MDISP, PRB }
+#define ARG_FMEM { FA, MDISP, PRB }
+#define ARG_OPR { RA, RB, DRC1 }
+
+#define ARG_OPRCAS { RA, RB, RC }
+
+#define ARG_OPRL { RA, LIT, DRC1 }
+#define ARG_OPRZ1 { ZA, RB, DRC1 }
+#define ARG_OPRLZ1 { ZA, LIT, RC }
+#define ARG_PCD { HMFN }
+#define ARG_HWMEM { RA, HWDISP, PRB }
+#define ARG_FPL { FA,LIT, DFC1 }
+#define ARG_FMA { FA,FB,F3, DFC1 }
+#define ARG_PREFETCH { ZA, MDISP, PRB }
+#define ARG_TOPR { RA, RB,R3, DRC1 }
+#define ARG_TOPRL { RA, LIT, R3,DRC1 }
+#define ARG_FMAL { FA,FB,FMALIT, DFC1 }
+#define ARG_ATMEM { RA, ATMDISP, PRB }
+#define ARG_VUAMEM { FA, ATMDISP, PRB }
+#define ARG_OPRLZ3 { RA, LIT, ZC }
+
+#define ARG_DISP13 {DISP13, RC}
+
+/* The opcode table.
+
+ The format of the opcode table is:
+
+ NAME OPCODE MASK { OPERANDS }
+
+ NAME is the name of the instruction.
+
+ OPCODE is the instruction opcode.
+
+ MASK is the opcode mask; this is used to tell the disassembler
+ which bits in the actual opcode must match OPCODE.
+
+ OPERANDS is the list of operands.
+
+ The preceding macros merge the text of the OPCODE and MASK fields.
+
+ The disassembler reads the table in order and prints the first
+ instruction which matches, so this table is sorted to put more
+ specific instructions before more general instructions.
+
+ Otherwise, it is sorted by major opcode and minor function code.
+ */
+
+const struct sw_64_opcode sw_64_opcodes[] = {
+ { "sys_call/b", PCD(0x00,0x00), BASE, ARG_PCD },
+ { "sys_call", PCD(0x00,0x01), BASE, ARG_PCD },
+
+ { "call", MEM(0x01), BASE, { RA, CPRB, JMPHINT } },
+ { "ret", MEM(0x02), BASE, { RA, CPRB, RETHINT } },
+ { "jmp", MEM(0x03), BASE, { RA, CPRB, JMPHINT } },
+ { "br", BRA(0x04), BASE, { ZA, BDISP } },
+ { "br", BRA(0x04), BASE, ARG_BRA },
+ { "bsr", BRA(0x05), BASE, ARG_BRA },
+ { "memb", MFC(0x06,0x0000), BASE, ARG_NONE },
+ { "imemb", MFC(0x06,0x0001), BASE, ARG_NONE },
+ { "rtc", MFC(0x06,0x0020), BASE, { RA, ZB } },
+ { "rtc", MFC(0x06,0x0020), BASE, { RA, RB } },
+ { "rcid", MFC(0x06,0x0040), BASE, { RA , ZB} },
+ { "halt", MFC(0x06,0x0080), BASE, { ZA, ZB } },
+ { "rd_f", MFC(0x06,0x1000), CORE3, { RA, ZB } },
+ { "wr_f", MFC(0x06,0x1020), CORE3, { RA, ZB } },
+ { "rtid", MFC(0x06,0x1040), BASE, { RA } },
+ { "pri_rcsr", CSR(0x06,0xFE), CORE3, { RA, RPIINDEX ,ZB } },
+ { "pri_wcsr", CSR(0x06,0xFF), CORE3, { RA, RPIINDEX ,ZB } },
+ { "pri_ret", PRIRET(0x07,0x0), BASE, { RA } },
+ { "pri_ret/b", PRIRET(0x07,0x1), BASE, { RA } },
+ { "lldw", ATMEM(0x08,0x0), BASE, ARG_ATMEM },
+ { "lldl", ATMEM(0x08,0x1), BASE, ARG_ATMEM },
+ { "ldw_inc", ATMEM(0x08,0x2), CORE3, ARG_ATMEM },
+ { "ldl_inc", ATMEM(0x08,0x3), CORE3, ARG_ATMEM },
+ { "ldw_dec", ATMEM(0x08,0x4), CORE3, ARG_ATMEM },
+ { "ldl_dec", ATMEM(0x08,0x5), CORE3, ARG_ATMEM },
+ { "ldw_set", ATMEM(0x08,0x6), CORE3, ARG_ATMEM },
+ { "ldl_set", ATMEM(0x08,0x7), CORE3, ARG_ATMEM },
+ { "lstw", ATMEM(0x08,0x8), BASE, ARG_ATMEM },
+ { "lstl", ATMEM(0x08,0x9), BASE, ARG_ATMEM },
+ { "ldw_nc", ATMEM(0x08,0xA), BASE, ARG_ATMEM },
+ { "ldl_nc", ATMEM(0x08,0xB), BASE, ARG_ATMEM },
+ { "ldd_nc", ATMEM(0x08,0xC), BASE, ARG_VUAMEM },
+ { "stw_nc", ATMEM(0x08,0xD), BASE, ARG_ATMEM },
+ { "stl_nc", ATMEM(0x08,0xE), BASE, ARG_ATMEM },
+ { "std_nc", ATMEM(0x08,0xF), BASE, ARG_VUAMEM },
+ { "fillcs", MEM(0x09), BASE, ARG_PREFETCH },
+ { "ldwe", MEM(0x09), BASE, ARG_FMEM },
+ { "e_fillcs", MEM(0x0A), BASE, ARG_PREFETCH },
+ { "ldse", MEM(0x0A), BASE, ARG_FMEM },
+ { "fillcs_e", MEM(0x0B), BASE, ARG_PREFETCH },
+ { "ldde", MEM(0x0B), BASE, ARG_FMEM },
+ { "vlds", MEM(0x0C), BASE, ARG_FMEM },
+ { "vldd", MEM(0x0D), BASE, ARG_FMEM },
+ { "vsts", MEM(0x0E), BASE, ARG_FMEM },
+ { "vstd", MEM(0x0F), BASE, ARG_FMEM },
+ { "addw", OPR(0x10,0x00), BASE, ARG_OPR },
+ { "addw", OPRL(0x12,0x00), BASE, ARG_OPRL },
+ { "subw", OPR(0x10,0x01), BASE, ARG_OPR },
+ { "subw", OPRL(0x12,0x01), BASE, ARG_OPRL },
+ { "s4addw", OPR(0x10,0x02), BASE, ARG_OPR },
+ { "s4addw", OPRL(0x12,0x02), BASE, ARG_OPRL },
+ { "s4subw", OPR(0x10,0x03), BASE, ARG_OPR },
+ { "s4subw", OPRL(0x12,0x03), BASE, ARG_OPRL },
+ { "s8addw", OPR(0x10,0x04), BASE, ARG_OPR },
+ { "s8addw", OPRL(0x12,0x04), BASE, ARG_OPRL },
+ { "s8subw", OPR(0x10,0x05), BASE, ARG_OPR },
+ { "s8subw", OPRL(0x12,0x05), BASE, ARG_OPRL },
+ { "addl", OPR(0x10,0x08), BASE, ARG_OPR },
+ { "addl", OPRL(0x12,0x08), BASE, ARG_OPRL },
+ { "subl", OPR(0x10,0x09), BASE, ARG_OPR },
+ { "subl", OPRL(0x12,0x09), BASE, ARG_OPRL },
+ { "s4addl", OPR(0x10,0x0A), BASE, ARG_OPR },
+ { "s4addl", OPRL(0x12,0x0A), BASE, ARG_OPRL },
+ { "s4subl", OPR(0x10,0x0B), BASE, ARG_OPR },
+ { "s4subl", OPRL(0x12,0x0B), BASE, ARG_OPRL },
+ { "s8addl", OPR(0x10,0x0C), BASE, ARG_OPR },
+ { "s8addl", OPRL(0x12,0x0C), BASE, ARG_OPRL },
+ { "s8subl", OPR(0x10,0x0D), BASE, ARG_OPR },
+ { "s8subl", OPRL(0x12,0x0D), BASE, ARG_OPRL },
+ { "mulw", OPR(0x10,0x10), BASE, ARG_OPR },
+ { "mulw", OPRL(0x12,0x10), BASE, ARG_OPRL },
+ { "mull", OPR(0x10,0x18), BASE, ARG_OPR },
+ { "mull", OPRL(0x12,0x18), BASE, ARG_OPRL },
+ { "umulh", OPR(0x10,0x19), BASE, ARG_OPR },
+ { "umulh", OPRL(0x12,0x19), BASE, ARG_OPRL },
+ { "cmpeq", OPR(0x10,0x28), BASE, ARG_OPR },
+ { "cmpeq", OPRL(0x12,0x28), BASE, ARG_OPRL },
+ { "cmplt", OPR(0x10,0x29), BASE, ARG_OPR },
+ { "cmplt", OPRL(0x12,0x29), BASE, ARG_OPRL },
+ { "cmple", OPR(0x10,0x2A), BASE, ARG_OPR },
+ { "cmple", OPRL(0x12,0x2A), BASE, ARG_OPRL },
+ { "cmpult", OPR(0x10,0x2B), BASE, ARG_OPR },
+ { "cmpult", OPRL(0x12,0x2B), BASE, ARG_OPRL },
+ { "cmpule", OPR(0x10,0x2C), BASE, ARG_OPR },
+ { "cmpule", OPRL(0x12,0x2C), BASE, ARG_OPRL },
+
+ { "and", OPR(0x10,0x38), BASE, ARG_OPR },
+ { "and", OPRL(0x12,0x38),BASE, ARG_OPRL },
+ { "bic", OPR(0x10,0x39), BASE, ARG_OPR },
+ { "bic", OPRL(0x12,0x39),BASE, ARG_OPRL },
+ { "bis", OPR(0x10,0x3A), BASE, ARG_OPR },
+ { "bis", OPRL(0x12,0x3A),BASE, ARG_OPRL },
+ { "ornot", OPR(0x10,0x3B), BASE, ARG_OPR },
+ { "ornot", OPRL(0x12,0x3B),BASE, ARG_OPRL },
+ { "xor", OPR(0x10,0x3C), BASE, ARG_OPR },
+ { "xor", OPRL(0x12,0x3C),BASE, ARG_OPRL },
+ { "eqv", OPR(0x10,0x3D), BASE, ARG_OPR },
+ { "eqv", OPRL(0x12,0x3D),BASE, ARG_OPRL },
+ { "inslb", OPR(0x10,0x40), BASE, ARG_OPR },
+ { "inslb", OPRL(0x12,0x40),BASE, ARG_OPRL },
+ { "inslh", OPR(0x10,0x41), BASE, ARG_OPR },
+ { "inslh", OPRL(0x12,0x41),BASE, ARG_OPRL },
+ { "inslw", OPR(0x10,0x42), BASE, ARG_OPR },
+ { "inslw", OPRL(0x12,0x42),BASE, ARG_OPRL },
+ { "insll", OPR(0x10,0x43), BASE, ARG_OPR },
+ { "insll", OPRL(0x12,0x43),BASE, ARG_OPRL },
+ { "inshb", OPR(0x10,0x44), BASE, ARG_OPR },
+ { "inshb", OPRL(0x12,0x44),BASE, ARG_OPRL },
+ { "inshh", OPR(0x10,0x45), BASE, ARG_OPR },
+ { "inshh", OPRL(0x12,0x45),BASE, ARG_OPRL },
+ { "inshw", OPR(0x10,0x46), BASE, ARG_OPR },
+ { "inshw", OPRL(0x12,0x46),BASE, ARG_OPRL },
+ { "inshl", OPR(0x10,0x47), BASE, ARG_OPR },
+ { "inshl", OPRL(0x12,0x47),BASE, ARG_OPRL },
+
+ { "sll", OPR(0x10,0x48), BASE, ARG_OPR },
+ { "sll", OPRL(0x12,0x48),BASE, ARG_OPRL },
+ { "srl", OPR(0x10,0x49), BASE, ARG_OPR },
+ { "srl", OPRL(0x12,0x49),BASE, ARG_OPRL },
+ { "sra", OPR(0x10,0x4A), BASE, ARG_OPR },
+ { "sra", OPRL(0x12,0x4A),BASE, ARG_OPRL },
+ { "extlb", OPR(0x10,0x50), BASE, ARG_OPR },
+ { "extlb", OPRL(0x12,0x50),BASE, ARG_OPRL },
+ { "extlh", OPR(0x10,0x51), BASE, ARG_OPR },
+ { "extlh", OPRL(0x12,0x51),BASE, ARG_OPRL },
+ { "extlw", OPR(0x10,0x52), BASE, ARG_OPR },
+ { "extlw", OPRL(0x12,0x52),BASE, ARG_OPRL },
+ { "extll", OPR(0x10,0x53), BASE, ARG_OPR },
+ { "extll", OPRL(0x12,0x53),BASE, ARG_OPRL },
+ { "exthb", OPR(0x10,0x54), BASE, ARG_OPR },
+ { "exthb", OPRL(0x12,0x54),BASE, ARG_OPRL },
+ { "exthh", OPR(0x10,0x55), BASE, ARG_OPR },
+ { "exthh", OPRL(0x12,0x55),BASE, ARG_OPRL },
+ { "exthw", OPR(0x10,0x56), BASE, ARG_OPR },
+ { "exthw", OPRL(0x12,0x56),BASE, ARG_OPRL },
+ { "exthl", OPR(0x10,0x57), BASE, ARG_OPR },
+ { "exthl", OPRL(0x12,0x57),BASE, ARG_OPRL },
+ { "ctpop", OPR(0x10,0x58), BASE, ARG_OPRZ1 },
+ { "ctlz", OPR(0x10,0x59), BASE, ARG_OPRZ1 },
+ { "cttz", OPR(0x10,0x5A), BASE, ARG_OPRZ1 },
+ { "masklb", OPR(0x10,0x60), BASE, ARG_OPR },
+ { "masklb", OPRL(0x12,0x60),BASE, ARG_OPRL },
+ { "masklh", OPR(0x10,0x61), BASE, ARG_OPR },
+ { "masklh", OPRL(0x12,0x61),BASE, ARG_OPRL },
+ { "masklw", OPR(0x10,0x62), BASE, ARG_OPR },
+ { "masklw", OPRL(0x12,0x62),BASE, ARG_OPRL },
+ { "maskll", OPR(0x10,0x63), BASE, ARG_OPR },
+ { "maskll", OPRL(0x12,0x63),BASE, ARG_OPRL },
+ { "maskhb", OPR(0x10,0x64), BASE, ARG_OPR },
+ { "maskhb", OPRL(0x12,0x64),BASE, ARG_OPRL },
+ { "maskhh", OPR(0x10,0x65), BASE, ARG_OPR },
+ { "maskhh", OPRL(0x12,0x65),BASE, ARG_OPRL },
+ { "maskhw", OPR(0x10,0x66), BASE, ARG_OPR },
+ { "maskhw", OPRL(0x12,0x66),BASE, ARG_OPRL },
+ { "maskhl", OPR(0x10,0x67), BASE, ARG_OPR },
+ { "maskhl", OPRL(0x12,0x67),BASE, ARG_OPRL },
+ { "zap", OPR(0x10,0x68), BASE, ARG_OPR },
+ { "zap", OPRL(0x12,0x68),BASE, ARG_OPRL },
+ { "zapnot", OPR(0x10,0x69), BASE, ARG_OPR },
+ { "zapnot", OPRL(0x12,0x69),BASE, ARG_OPRL },
+ { "sextb", OPR(0x10,0x6A), BASE, ARG_OPRZ1},
+ { "sextb", OPRL(0x12,0x6A),BASE, ARG_OPRLZ1 },
+ { "sexth", OPR(0x10,0x6B), BASE, ARG_OPRZ1 },
+ { "sexth", OPRL(0x12,0x6B),BASE, ARG_OPRLZ1 },
+ { "cmpgeb", OPR(0x10,0x6C), BASE, ARG_OPR },
+ { "cmpgeb", OPRL(0x12,0x6C),BASE, ARG_OPRL },
+ { "fimovs", OPR(0x10,0x70), BASE, { FA, ZB, RC } },
+ { "fimovd", OPR(0x10,0x78), BASE, { FA, ZB, RC } },
+ { "seleq", TOPR(0x11,0x0), BASE, ARG_TOPR },
+ { "seleq", TOPRL(0x13,0x0),BASE, ARG_TOPRL },
+ { "selge", TOPR(0x11,0x1), BASE, ARG_TOPR },
+ { "selge", TOPRL(0x13,0x1),BASE, ARG_TOPRL },
+ { "selgt", TOPR(0x11,0x2), BASE, ARG_TOPR },
+ { "selgt", TOPRL(0x13,0x2),BASE, ARG_TOPRL },
+ { "selle", TOPR(0x11,0x3), BASE, ARG_TOPR },
+ { "selle", TOPRL(0x13,0x3),BASE, ARG_TOPRL },
+ { "sellt", TOPR(0x11,0x4), BASE, ARG_TOPR },
+ { "sellt", TOPRL(0x13,0x4),BASE, ARG_TOPRL },
+ { "selne", TOPR(0x11,0x5), BASE, ARG_TOPR },
+ { "selne", TOPRL(0x13,0x5),BASE, ARG_TOPRL },
+ { "sellbc", TOPR(0x11,0x6), BASE, ARG_TOPR },
+ { "sellbc", TOPRL(0x13,0x6),BASE, ARG_TOPRL },
+ { "sellbs", TOPR(0x11,0x7), BASE, ARG_TOPR },
+ { "sellbs", TOPRL(0x13,0x7),BASE, ARG_TOPRL },
+ { "vlog", LOGX(0x14,0x00), BASE, ARG_FMA },
+
+ { "fadds", FP(0x18,0x00), BASE, ARG_FP },
+ { "faddd", FP(0x18,0x01), BASE, ARG_FP },
+ { "fsubs", FP(0x18,0x02), BASE, ARG_FP },
+ { "fsubd", FP(0x18,0x03), BASE, ARG_FP },
+ { "fmuls", FP(0x18,0x04), BASE, ARG_FP },
+ { "fmuld", FP(0x18,0x05), BASE, ARG_FP },
+ { "fdivs", FP(0x18,0x06), BASE, ARG_FP },
+ { "fdivd", FP(0x18,0x07), BASE, ARG_FP },
+ { "fsqrts", FP(0x18,0x08), BASE, ARG_FPZ1 },
+ { "fsqrtd", FP(0x18,0x09), BASE, ARG_FPZ1 },
+ { "fcmpeq", FP(0x18,0x10), BASE, ARG_FP },
+ { "fcmple", FP(0x18,0x11), BASE, ARG_FP },
+ { "fcmplt", FP(0x18,0x12), BASE, ARG_FP },
+ { "fcmpun", FP(0x18,0x13), BASE, ARG_FP },
+
+ { "fcvtsd", FP(0x18,0x20), BASE, ARG_FPZ1 },
+ { "fcvtds", FP(0x18,0x21), BASE, ARG_FPZ1 },
+ { "fcvtdl_g", FP(0x18,0x22), BASE, ARG_FPZ1 },
+ { "fcvtdl_p", FP(0x18,0x23), BASE, ARG_FPZ1 },
+ { "fcvtdl_z", FP(0x18,0x24), BASE, ARG_FPZ1 },
+ { "fcvtdl_n", FP(0x18,0x25), BASE, ARG_FPZ1 },
+ { "fcvtdl", FP(0x18,0x27), BASE, ARG_FPZ1 },
+ { "fcvtwl", FP(0x18,0x28), BASE, ARG_FPZ1 },
+ { "fcvtlw", FP(0x18,0x29), BASE, ARG_FPZ1 },
+ { "fcvtls", FP(0x18,0x2d), BASE, ARG_FPZ1 },
+ { "fcvtld", FP(0x18,0x2f), BASE, ARG_FPZ1 },
+ { "fcpys", FP(0x18,0x30), BASE, ARG_FP },
+ { "fcpyse", FP(0x18,0x31), BASE, ARG_FP },
+ { "fcpysn", FP(0x18,0x32), BASE, ARG_FP },
+ { "ifmovs", FP(0x18,0x40), BASE, { RA, ZB, FC } },
+ { "ifmovd", FP(0x18,0x41), BASE, { RA, ZB, FC } },
+ { "rfpcr", FP(0x18,0x50), BASE, { FA, RBA, RCA } },
+ { "wfpcr", FP(0x18,0x51), BASE, { FA, RBA, RCA } },
+ { "setfpec0", FP(0x18,0x54), BASE, ARG_NONE },
+ { "setfpec1", FP(0x18,0x55), BASE, ARG_NONE },
+ { "setfpec2", FP(0x18,0x56), BASE, ARG_NONE },
+ { "setfpec3", FP(0x18,0x57), BASE, ARG_NONE },
+ { "fmas", FMA(0x19,0x00), BASE, ARG_FMA },
+ { "fmad", FMA(0x19,0x01), BASE, ARG_FMA },
+ { "fmss", FMA(0x19,0x02), BASE, ARG_FMA },
+ { "fmsd", FMA(0x19,0x03), BASE, ARG_FMA },
+ { "fnmas", FMA(0x19,0x04), BASE, ARG_FMA },
+ { "fnmad", FMA(0x19,0x05), BASE, ARG_FMA },
+ { "fnmss", FMA(0x19,0x06), BASE, ARG_FMA },
+ { "fnmsd", FMA(0x19,0x07), BASE, ARG_FMA },
+ { "fseleq", FMA(0x19,0x10), BASE, ARG_FMA },
+ { "fselne", FMA(0x19,0x11), BASE, ARG_FMA },
+ { "fsellt", FMA(0x19,0x12), BASE, ARG_FMA },
+ { "fselle", FMA(0x19,0x13), BASE, ARG_FMA },
+ { "fselgt", FMA(0x19,0x14), BASE, ARG_FMA },
+ { "fselge", FMA(0x19,0x15), BASE, ARG_FMA },
+ { "vaddw", FP(0x1A,0x00), BASE, ARG_FP },
+ { "vaddw", FP(0x1A,0x20), BASE, ARG_FPL },
+ { "vsubw", FP(0x1A,0x01), BASE, ARG_FP },
+ { "vsubw", FP(0x1A,0x21), BASE, ARG_FPL },
+ { "vcmpgew", FP(0x1A,0x02), BASE, ARG_FP },
+ { "vcmpgew", FP(0x1A,0x22), BASE, ARG_FPL },
+ { "vcmpeqw", FP(0x1A,0x03), BASE, ARG_FP },
+ { "vcmpeqw", FP(0x1A,0x23), BASE, ARG_FPL },
+ { "vcmplew", FP(0x1A,0x04), BASE, ARG_FP },
+ { "vcmplew", FP(0x1A,0x24), BASE, ARG_FPL },
+ { "vcmpltw", FP(0x1A,0x05), BASE, ARG_FP },
+ { "vcmpltw", FP(0x1A,0x25), BASE, ARG_FPL },
+ { "vcmpulew", FP(0x1A,0x06), BASE, ARG_FP },
+ { "vcmpulew", FP(0x1A,0x26), BASE, ARG_FPL },
+ { "vcmpultw", FP(0x1A,0x07), BASE, ARG_FP },
+ { "vcmpultw", FP(0x1A,0x27), BASE, ARG_FPL },
+
+ { "vsllw", FP(0x1A,0x08), BASE, ARG_FP },
+ { "vsllw", FP(0x1A,0x28), BASE, ARG_FPL },
+ { "vsrlw", FP(0x1A,0x09), BASE, ARG_FP },
+ { "vsrlw", FP(0x1A,0x29), BASE, ARG_FPL },
+ { "vsraw", FP(0x1A,0x0A), BASE, ARG_FP },
+ { "vsraw", FP(0x1A,0x2A), BASE, ARG_FPL },
+ { "vrolw", FP(0x1A,0x0B), BASE, ARG_FP },
+ { "vrolw", FP(0x1A,0x2B), BASE, ARG_FPL },
+ { "sllow", FP(0x1A,0x0C), BASE, ARG_FP },
+ { "sllow", FP(0x1A,0x2C), BASE, ARG_FPL },
+ { "srlow", FP(0x1A,0x0D), BASE, ARG_FP },
+ { "srlow", FP(0x1A,0x2D), BASE, ARG_FPL },
+ { "vaddl", FP(0x1A,0x0E), BASE, ARG_FP },
+ { "vaddl", FP(0x1A,0x2E), BASE, ARG_FPL },
+ { "vsubl", FP(0x1A,0x0F), BASE, ARG_FP },
+ { "vsubl", FP(0x1A,0x2F), BASE, ARG_FPL },
+ { "ctpopow", FP(0x1A,0x18), BASE, { FA, ZB, DFC1 } },
+ { "ctlzow", FP(0x1A,0x19), BASE, { FA, ZB, DFC1 } },
+ { "vucaddw", FP(0x1A,0x40), BASE, ARG_FP },
+ { "vucaddw", FP(0x1A,0x60), BASE, ARG_FPL },
+ { "vucsubw", FP(0x1A,0x41), BASE, ARG_FP },
+ { "vucsubw", FP(0x1A,0x61), BASE, ARG_FPL },
+ { "vucaddh", FP(0x1A,0x42), BASE, ARG_FP },
+ { "vucaddh", FP(0x1A,0x62), BASE, ARG_FPL },
+ { "vucsubh", FP(0x1A,0x43), BASE, ARG_FP },
+ { "vucsubh", FP(0x1A,0x63), BASE, ARG_FPL },
+ { "vucaddb", FP(0x1A,0x44), BASE, ARG_FP },
+ { "vucaddb", FP(0x1A,0x64), BASE, ARG_FPL },
+ { "vucsubb", FP(0x1A,0x45), BASE, ARG_FP },
+ { "vucsubb", FP(0x1A,0x65), BASE, ARG_FPL },
+ { "vadds", FP(0x1A,0x80), BASE, ARG_FP },
+ { "vaddd", FP(0x1A,0x81), BASE, ARG_FP },
+ { "vsubs", FP(0x1A,0x82), BASE, ARG_FP },
+ { "vsubd", FP(0x1A,0x83), BASE, ARG_FP },
+ { "vmuls", FP(0x1A,0x84), BASE, ARG_FP },
+ { "vmuld", FP(0x1A,0x85), BASE, ARG_FP },
+ { "vdivs", FP(0x1A,0x86), BASE, ARG_FP },
+ { "vdivd", FP(0x1A,0x87), BASE, ARG_FP },
+ { "vsqrts", FP(0x1A,0x88), BASE, ARG_FPZ1 },
+ { "vsqrtd", FP(0x1A,0x89), BASE, ARG_FPZ1 },
+ { "vfcmpeq", FP(0x1A,0x8C), BASE, ARG_FP },
+ { "vfcmple", FP(0x1A,0x8D), BASE, ARG_FP },
+ { "vfcmplt", FP(0x1A,0x8E), BASE, ARG_FP },
+ { "vfcmpun", FP(0x1A,0x8F), BASE, ARG_FP },
+ { "vcpys", FP(0x1A,0x90), BASE, ARG_FP },
+ { "vcpyse", FP(0x1A,0x91), BASE, ARG_FP },
+ { "vcpysn", FP(0x1A,0x92), BASE, ARG_FP },
+ { "vmas", FMA(0x1B,0x00), BASE, ARG_FMA },
+ { "vmad", FMA(0x1B,0x01), BASE, ARG_FMA },
+ { "vmss", FMA(0x1B,0x02), BASE, ARG_FMA },
+ { "vmsd", FMA(0x1B,0x03), BASE, ARG_FMA },
+ { "vnmas", FMA(0x1B,0x04), BASE, ARG_FMA },
+ { "vnmad", FMA(0x1B,0x05), BASE, ARG_FMA },
+ { "vnmss", FMA(0x1B,0x06), BASE, ARG_FMA },
+ { "vnmsd", FMA(0x1B,0x07), BASE, ARG_FMA },
+ { "vfseleq", FMA(0x1B,0x10), BASE, ARG_FMA },
+ { "vfsellt", FMA(0x1B,0x12), BASE, ARG_FMA },
+ { "vfselle", FMA(0x1B,0x13), BASE, ARG_FMA },
+ { "vseleqw", FMA(0x1B,0x18), BASE, ARG_FMA },
+ { "vseleqw", FMA(0x1B,0x38), BASE, ARG_FMAL },
+ { "vsellbcw", FMA(0x1B,0x19), BASE, ARG_FMA },
+ { "vsellbcw", FMA(0x1B,0x39), BASE, ARG_FMAL },
+ { "vselltw", FMA(0x1B,0x1A), BASE, ARG_FMA },
+ { "vselltw", FMA(0x1B,0x3A), BASE, ARG_FMAL },
+ { "vsellew", FMA(0x1B,0x1B), BASE, ARG_FMA },
+ { "vsellew", FMA(0x1B,0x3B), BASE, ARG_FMAL },
+ { "vinsw", FMA(0x1B,0x20), BASE, ARG_FMAL },
+ { "vinsf", FMA(0x1B,0x21), BASE, ARG_FMAL },
+ { "vextw", FMA(0x1B,0x22), BASE, { FA, FMALIT, DFC1 }},
+ { "vextf", FMA(0x1B,0x23), BASE, { FA, FMALIT, DFC1 }},
+ { "vcpyw", FMA(0x1B,0x24), BASE, { FA, DFC1 }},
+ { "vcpyf", FMA(0x1B,0x25), BASE, { FA, DFC1 }},
+ { "vconw", FMA(0x1B,0x26), BASE, ARG_FMA },
+ { "vshfw", FMA(0x1B,0x27), BASE, ARG_FMA },
+ { "vcons", FMA(0x1B,0x28), BASE, ARG_FMA },
+ { "vcond", FMA(0x1B,0x29), BASE, ARG_FMA },
+ { "vldw_u", ATMEM(0x1C,0x0), BASE, ARG_VUAMEM },
+ { "vstw_u", ATMEM(0x1C,0x1), BASE, ARG_VUAMEM },
+ { "vlds_u", ATMEM(0x1C,0x2), BASE, ARG_VUAMEM },
+ { "vsts_u", ATMEM(0x1C,0x3), BASE, ARG_VUAMEM },
+ { "vldd_u", ATMEM(0x1C,0x4), BASE, ARG_VUAMEM },
+ { "vstd_u", ATMEM(0x1C,0x5), BASE, ARG_VUAMEM },
+ { "vstw_ul", ATMEM(0x1C,0x8), BASE, ARG_VUAMEM },
+ { "vstw_uh", ATMEM(0x1C,0x9), BASE, ARG_VUAMEM },
+ { "vsts_ul", ATMEM(0x1C,0xA), BASE, ARG_VUAMEM },
+ { "vsts_uh", ATMEM(0x1C,0xB), BASE, ARG_VUAMEM },
+ { "vstd_ul", ATMEM(0x1C,0xC), BASE, ARG_VUAMEM },
+ { "vstd_uh", ATMEM(0x1C,0xD), BASE, ARG_VUAMEM },
+ { "vldd_nc", ATMEM(0x1C,0xE), BASE, ARG_VUAMEM },
+ { "vstd_nc", ATMEM(0x1C,0xF), BASE, ARG_VUAMEM },
+
+ { "flushd", MEM(0x20), BASE, ARG_PREFETCH },
+ { "ldbu", MEM(0x20), BASE, ARG_MEM },
+ { "evictdg", MEM(0x21), BASE, ARG_PREFETCH },
+ { "ldhu", MEM(0x21), BASE, ARG_MEM },
+ { "s_fillcs", MEM(0x22), BASE, ARG_PREFETCH },
+ { "ldw", MEM(0x22), BASE, ARG_MEM },
+ { "s_fillde", MEM(0x23), BASE, ARG_PREFETCH },
+ { "ldl", MEM(0x23), BASE, ARG_MEM },
+ { "evictdl", MEM(0x24), BASE, ARG_PREFETCH },
+ { "ldl_u", MEM(0x24), BASE, ARG_MEM },
+ { "pri_ldw/p", HWMEM(0x25,0x0), BASE, ARG_HWMEM },
+ { "pri_ldw/v", HWMEM(0x25,0x8), BASE, ARG_HWMEM },
+ { "pri_ldl/p", HWMEM(0x25,0x1), BASE, ARG_HWMEM },
+ { "pri_ldl/v", HWMEM(0x25,0x9), BASE, ARG_HWMEM },
+ { "fillde", MEM(0x26), BASE, ARG_PREFETCH },
+ { "flds", MEM(0x26), BASE, ARG_FMEM },
+ { "fillde_e", MEM(0x27), BASE, ARG_PREFETCH },
+ { "fldd", MEM(0x27), BASE, ARG_FMEM },
+
+ { "stb", MEM(0x28), BASE, ARG_MEM },
+ { "sth", MEM(0x29), BASE, ARG_MEM },
+ { "stw", MEM(0x2A), BASE, ARG_MEM },
+ { "stl", MEM(0x2B), BASE, ARG_MEM },
+ { "stl_u", MEM(0x2C), BASE, ARG_MEM },
+ { "pri_stw/p", HWMEM(0x2D,0x0), BASE, ARG_HWMEM },
+ { "pri_stw/v", HWMEM(0x2D,0x8), BASE, ARG_HWMEM },
+ { "pri_stl/p", HWMEM(0x2D,0x1), BASE, ARG_HWMEM },
+ { "pri_stl/v", HWMEM(0x2D,0x9), BASE, ARG_HWMEM },
+ { "fsts", MEM(0x2E), BASE, ARG_FMEM },
+ { "fstd", MEM(0x2F), BASE, ARG_FMEM },
+ { "beq", BRA(0x30), BASE, ARG_BRA },
+ { "bne", BRA(0x31), BASE, ARG_BRA },
+ { "blt", BRA(0x32), BASE, ARG_BRA },
+ { "ble", BRA(0x33), BASE, ARG_BRA },
+ { "bgt", BRA(0x34), BASE, ARG_BRA },
+ { "bge", BRA(0x35), BASE, ARG_BRA },
+ { "blbc", BRA(0x36), BASE, ARG_BRA },
+ { "blbs", BRA(0x37), BASE, ARG_BRA },
+
+ { "fbeq", BRA(0x38), BASE, ARG_FBRA },
+ { "fbne", BRA(0x39), BASE, ARG_FBRA },
+ { "fblt", BRA(0x3A), BASE, ARG_FBRA },
+ { "fble", BRA(0x3B), BASE, ARG_FBRA },
+ { "fbgt", BRA(0x3C), BASE, ARG_FBRA },
+ { "fbge", BRA(0x3D), BASE, ARG_FBRA },
+ { "ldi", MEM(0x3E), BASE, ARG_MEM },
+ { "ldih", MEM(0x3F), BASE, ARG_MEM },
+};
+
+const unsigned sw_64_num_opcodes = sizeof(sw_64_opcodes) / sizeof(*sw_64_opcodes);
+
+/* OSF register names. */
+
+static const char * const osf_regnames[64] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "t12", "at", "gp", "sp", "zero",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31"
+};
+
+/* VMS register names. */
+
+static const char * const vms_regnames[64] = {
+ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
+ "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
+ "R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23",
+ "R24", "AI", "RA", "PV", "AT", "FP", "SP", "RZ",
+ "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
+ "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15",
+ "F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23",
+ "F24", "F25", "F26", "F27", "F28", "F29", "F30", "FZ"
+};
+
+int print_insn_sw_64(bfd_vma memaddr, struct disassemble_info *info)
+{
+ static const struct sw_64_opcode *opcode_index[SW_NOPS + 1];
+ const char * const * regnames;
+ const struct sw_64_opcode *opcode, *opcode_end;
+ const unsigned char *opindex;
+ unsigned insn, op, isa_mask;
+ int need_comma;
+
+ /* Initialize the majorop table the first time through */
+ if (!opcode_index[0]) {
+ opcode = sw_64_opcodes;
+ opcode_end = opcode + sw_64_num_opcodes;
+
+ for (op = 0; op < SW_NOPS; ++op) {
+ opcode_index[op] = opcode;
+ if ((SW_LITOP (opcode->opcode) != 0x10) && (SW_LITOP (opcode->opcode) != 0x11)) {
+ while (opcode < opcode_end && op == SW_OP (opcode->opcode))
+ ++opcode;
+ } else {
+ while (opcode < opcode_end && op == SW_LITOP (opcode->opcode))
+ ++opcode;
+ }
+ }
+ opcode_index[op] = opcode;
+ }
+
+ if (info->flavour == bfd_target_evax_flavour)
+ regnames = vms_regnames;
+ else
+ regnames = osf_regnames;
+ isa_mask = SW_OPCODE_NOHM;
+ switch (info->mach) {
+ case bfd_mach_sw_64_core3:
+ isa_mask |= SW_OPCODE_BASE | SW_OPCODE_CORE3;
+ break;
+ }
+
+ /* Read the insn into a host word */
+ {
+ bfd_byte buffer[4];
+ int status = (*info->read_memory_func) (memaddr, buffer, 4, info);
+ if (status != 0) {
+ (*info->memory_error_func) (status, memaddr, info);
+ return -1;
+ }
+ insn = bfd_getl32 (buffer);
+ }
+
+ /* Get the major opcode of the instruction. */
+ if ((SW_LITOP (insn) == 0x10) || (SW_LITOP (insn) == 0x11))
+ op = SW_LITOP (insn);
+ else if ((SW_OP(insn) & 0x3C) == 0x14 )
+ op = 0x14;
+ else
+ op = SW_OP (insn);
+
+ /* Find the first match in the opcode table. */
+ opcode_end = opcode_index[op + 1];
+ for (opcode = opcode_index[op]; opcode < opcode_end; ++opcode) {
+ if ((insn ^ opcode->opcode) & opcode->mask)
+ continue;
+
+ if (!(opcode->flags & isa_mask))
+ continue;
+
+ /* Make two passes over the operands. First see if any of them
+ have extraction functions, and, if they do, make sure the
+ instruction is valid. */
+ {
+ int invalid = 0;
+ for (opindex = opcode->operands; *opindex != 0; opindex++) {
+ const struct sw_64_operand *operand = sw_64_operands + *opindex;
+ if (operand->extract)
+ (*operand->extract) (insn, &invalid);
+ }
+ if (invalid)
+ continue;
+ }
+
+ /* The instruction is valid. */
+ goto found;
+ }
+
+ /* No instruction found */
+ (*info->fprintf_func) (info->stream, ".long %#08x", insn);
+
+ return 4;
+
+found:
+ if (!strncmp("sys_call",opcode->name,8)) {
+ if (insn & (0x1 << 25))
+ (*info->fprintf_func) (info->stream, "%s", "sys_call");
+ else
+ (*info->fprintf_func) (info->stream, "%s", "sys_call/b");
+ } else
+ (*info->fprintf_func) (info->stream, "%s", opcode->name);
+
+ /* get zz[7:6] and zz[5:0] to form truth for vlog */
+ if (!strcmp(opcode->name, "vlog"))
+ {
+ unsigned int truth;
+ char tr[4];
+ truth=(SW_OP(insn) & 3) << 6;
+ truth = truth | ((insn & 0xFC00) >> 10);
+ sprintf(tr,"%x",truth);
+ (*info->fprintf_func) (info->stream, "%s", tr);
+ }
+ if (opcode->operands[0] != 0)
+ (*info->fprintf_func) (info->stream, "\t");
+
+ /* Now extract and print the operands. */
+ need_comma = 0;
+ for (opindex = opcode->operands; *opindex != 0; opindex++) {
+ const struct sw_64_operand *operand = sw_64_operands + *opindex;
+ int value;
+
+ /* Operands that are marked FAKE are simply ignored. We
+ already made sure that the extract function considered
+ the instruction to be valid. */
+ if ((operand->flags & SW_OPERAND_FAKE) != 0)
+ continue;
+
+ /* Extract the value from the instruction. */
+ if (operand->extract)
+ value = (*operand->extract) (insn, (int *) NULL);
+ else {
+ value = (insn >> operand->shift) & ((1 << operand->bits) - 1);
+ if (operand->flags & SW_OPERAND_SIGNED) {
+ int signbit = 1 << (operand->bits - 1);
+ value = (value ^ signbit) - signbit;
+ }
+ }
+
+ if (need_comma &&
+ ((operand->flags & (SW_OPERAND_PARENS | SW_OPERAND_COMMA))
+ != SW_OPERAND_PARENS)) {
+ (*info->fprintf_func) (info->stream, ",");
+ }
+ if (operand->flags & SW_OPERAND_PARENS)
+ (*info->fprintf_func) (info->stream, "(");
+
+ /* Print the operand as directed by the flags. */
+ if (operand->flags & SW_OPERAND_IR)
+ (*info->fprintf_func) (info->stream, "%s", regnames[value]);
+ else if (operand->flags & SW_OPERAND_FPR)
+ (*info->fprintf_func) (info->stream, "%s", regnames[value + 32]);
+ else if (operand->flags & SW_OPERAND_RELATIVE)
+ (*info->print_address_func) (memaddr + 4 + value, info);
+ else if (operand->flags & SW_OPERAND_SIGNED)
+ (*info->fprintf_func) (info->stream, "%d", value);
+ else
+ (*info->fprintf_func) (info->stream, "%#x", value);
+
+ if (operand->flags & SW_OPERAND_PARENS)
+ (*info->fprintf_func) (info->stream, ")");
+ need_comma = 1;
+ }
+
+ return 4;
+}
diff --git a/hw/Kconfig b/hw/Kconfig
index ad20cce0a9..5f3957be0f 100644
--- a/hw/Kconfig
+++ b/hw/Kconfig
@@ -63,6 +63,7 @@ source sparc/Kconfig
source sparc64/Kconfig
source tricore/Kconfig
source xtensa/Kconfig
+source sw64/Kconfig
# Symbols used by multiple targets
config TEST_DEVICES
diff --git a/hw/meson.build b/hw/meson.build
index b3366c888e..f39c1f7e70 100644
--- a/hw/meson.build
+++ b/hw/meson.build
@@ -62,5 +62,6 @@ subdir('s390x')
subdir('sh4')
subdir('sparc')
subdir('sparc64')
+subdir('sw64')
subdir('tricore')
subdir('xtensa')
diff --git a/hw/rtc/sun4v-rtc.c b/hw/rtc/sun4v-rtc.c
index e037acd1b5..58a0cff483 100644
--- a/hw/rtc/sun4v-rtc.c
+++ b/hw/rtc/sun4v-rtc.c
@@ -32,10 +32,17 @@ static uint64_t sun4v_rtc_read(void *opaque, hwaddr addr,
unsigned size)
{
uint64_t val = get_clock_realtime() / NANOSECONDS_PER_SECOND;
+#if defined(__sw_64__)
+ if (addr & 4ULL) {
+ /* accessing the high 32 bits */
+ val >>= 32;
+ }
+#else
if (!(addr & 4ULL)) {
/* accessing the high 32 bits */
val >>= 32;
}
+#endif
trace_sun4v_rtc_read(addr, val);
return val;
}
@@ -49,7 +56,11 @@ static void sun4v_rtc_write(void *opaque, hwaddr addr,
static const MemoryRegionOps sun4v_rtc_ops = {
.read = sun4v_rtc_read,
.write = sun4v_rtc_write,
+#if defined(__sw_64__)
+ .endianness = DEVICE_LITTLE_ENDIAN,
+#else
.endianness = DEVICE_NATIVE_ENDIAN,
+#endif
};
void sun4v_rtc_init(hwaddr addr)
diff --git a/hw/sw64/Kconfig b/hw/sw64/Kconfig
new file mode 100644
index 0000000000..2bf19e8234
--- /dev/null
+++ b/hw/sw64/Kconfig
@@ -0,0 +1,11 @@
+config CORE3
+ bool
+ imply PCI_DEVICES
+ imply TEST_DEVICES
+ imply E1000_PCI
+ select PCI_EXPRESS
+ select SUN4V_RTC
+ select VIRTIO_MMIO
+ select SERIAL
+ select IDE_CMD646
+ select VIRTIO_VGA
diff --git a/hw/sw64/Makefile.objs b/hw/sw64/Makefile.objs
new file mode 100644
index 0000000000..73add9a91d
--- /dev/null
+++ b/hw/sw64/Makefile.objs
@@ -0,0 +1 @@
+obj-y += core3.o core3_board.o
diff --git a/hw/sw64/core.h b/hw/sw64/core.h
new file mode 100644
index 0000000000..4923382229
--- /dev/null
+++ b/hw/sw64/core.h
@@ -0,0 +1,25 @@
+#ifndef HW_SW64_SYS_H
+#define HW_SW64_SYS_H
+
+typedef struct boot_params {
+ unsigned long initrd_size; /* size of initrd */
+ unsigned long initrd_start; /* logical address of initrd */
+ unsigned long dtb_start; /* logical address of dtb */
+ unsigned long efi_systab; /* logical address of EFI system table */
+ unsigned long efi_memmap; /* logical address of EFI memory map */
+ unsigned long efi_memmap_size; /* size of EFI memory map */
+ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */
+ unsigned long efi_memdesc_version; /* memory descriptor version */
+ unsigned long cmdline; /* logical address of cmdline */
+} BOOT_PARAMS;
+
+void core3_board_init(SW64CPU *cpus[4], MemoryRegion *ram);
+#endif
+
+#define MAX_CPUS 64
+
+#ifdef CONFIG_KVM
+#define MAX_CPUS_CORE3 64
+#else
+#define MAX_CPUS_CORE3 32
+#endif
diff --git a/hw/sw64/core3.c b/hw/sw64/core3.c
new file mode 100644
index 0000000000..dbe4ed6fa1
--- /dev/null
+++ b/hw/sw64/core3.c
@@ -0,0 +1,182 @@
+/*
+ * QEMU CORE3 hardware system emulator.
+ *
+ * Copyright (c) 2021 Lu Feifei
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/datadir.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "elf.h"
+#include "hw/loader.h"
+#include "hw/boards.h"
+#include "qemu/error-report.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "sysemu/reset.h"
+#include "hw/ide.h"
+#include "hw/char/serial.h"
+#include "qemu/cutils.h"
+#include "ui/console.h"
+#include "core.h"
+#include "hw/boards.h"
+#include "sysemu/numa.h"
+
+static uint64_t cpu_sw64_virt_to_phys(void *opaque, uint64_t addr)
+{
+ return addr &= ~0xffffffff80000000 ;
+}
+
+static CpuInstanceProperties
+sw64_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
+
+ assert(cpu_index < possible_cpus->len);
+ return possible_cpus->cpus[cpu_index].props;
+}
+
+static int64_t sw64_get_default_cpu_node_id(const MachineState *ms, int idx)
+{
+ int nb_numa_nodes = ms->numa_state->num_nodes;
+ return idx % nb_numa_nodes;
+}
+
+static const CPUArchIdList *sw64_possible_cpu_arch_ids(MachineState *ms)
+{
+ int i;
+ unsigned int max_cpus = ms->smp.max_cpus;
+
+ if (ms->possible_cpus) {
+ /*
+ * make sure that max_cpus hasn't changed since the first use, i.e.
+ * -smp hasn't been parsed after it
+ */
+ assert(ms->possible_cpus->len == max_cpus);
+ return ms->possible_cpus;
+ }
+
+ ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * max_cpus);
+ ms->possible_cpus->len = max_cpus;
+ for (i = 0; i < ms->possible_cpus->len; i++) {
+ ms->possible_cpus->cpus[i].type = ms->cpu_type;
+ ms->possible_cpus->cpus[i].vcpus_count = 1;
+ ms->possible_cpus->cpus[i].arch_id = i;
+ ms->possible_cpus->cpus[i].props.has_thread_id = true;
+ ms->possible_cpus->cpus[i].props.core_id = i;
+ }
+
+ return ms->possible_cpus;
+}
+
+static void core3_cpu_reset(void *opaque)
+{
+ SW64CPU *cpu = opaque;
+
+ cpu_reset(CPU(cpu));
+}
+
+static void core3_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ ram_addr_t buf;
+ SW64CPU *cpus[machine->smp.max_cpus];
+ long i, size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ char *hmcode_filename;
+ char *uefi_filename;
+ uint64_t hmcode_entry, hmcode_low, hmcode_high;
+ uint64_t kernel_entry, kernel_low, kernel_high;
+ BOOT_PARAMS *core3_boot_params = g_new0(BOOT_PARAMS, 1);
+ uint64_t param_offset;
+
+ memset(cpus, 0, sizeof(cpus));
+
+ for (i = 0; i < machine->smp.cpus; ++i) {
+ cpus[i] = SW64_CPU(cpu_create(machine->cpu_type));
+ cpus[i]->env.csr[CID] = i;
+ qemu_register_reset(core3_cpu_reset, cpus[i]);
+ }
+ core3_board_init(cpus, machine->ram);
+ if (kvm_enabled())
+ buf = ram_size;
+ else
+ buf = ram_size | (1UL << 63);
+
+ rom_add_blob_fixed("ram_size", (char *)&buf, 0x8, 0x2040);
+
+ param_offset = 0x90B000UL;
+ core3_boot_params->cmdline = param_offset | 0xfff0000000000000UL;
+ rom_add_blob_fixed("core3_boot_params", (core3_boot_params), 0x48, 0x90A100);
+
+ hmcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, kvm_enabled() ? "core3-reset":"core3-hmcode");
+ if (hmcode_filename == NULL) {
+ if (kvm_enabled())
+ error_report("no core3-reset provided");
+ else
+ error_report("no core3-hmcode provided");
+ exit(1);
+ }
+ size = load_elf(hmcode_filename, NULL, cpu_sw64_virt_to_phys, NULL,
+ &hmcode_entry, &hmcode_low, &hmcode_high, NULL, 0, EM_SW64, 0, 0);
+ if (size < 0) {
+ if (kvm_enabled())
+ error_report("could not load core3-reset: '%s'", hmcode_filename);
+ else
+ error_report("could not load core3-hmcode: '%s'", hmcode_filename);
+ exit(1);
+ }
+ g_free(hmcode_filename);
+
+ /* Start all cpus at the hmcode RESET entry point. */
+ for (i = 0; i < machine->smp.cpus; ++i) {
+ cpus[i]->env.pc = hmcode_entry;
+ cpus[i]->env.hm_entry = hmcode_entry;
+ }
+
+ if (!kernel_filename) {
+ uefi_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "uefi-bios-sw");
+ load_image_targphys(uefi_filename, 0x2f00000UL, -1);
+ g_free(uefi_filename);
+ } else {
+ /* Load a kernel. */
+ size = load_elf(kernel_filename, NULL, cpu_sw64_virt_to_phys, NULL,
+ &kernel_entry, &kernel_low, &kernel_high, NULL, 0, EM_SW64, 0, 0);
+ if (size < 0) {
+ error_report("could not load kernel '%s'", kernel_filename);
+ exit(1);
+ }
+ cpus[0]->env.trap_arg1 = kernel_entry;
+ if (kernel_cmdline)
+ pstrcpy_targphys("cmdline", param_offset, 0x400, kernel_cmdline);
+ }
+}
+
+static void board_reset(MachineState *state)
+{
+ qemu_devices_reset();
+}
+
+static void core3_machine_init(MachineClass *mc)
+{
+ mc->desc = "core3 BOARD";
+ mc->init = core3_init;
+ mc->block_default_type = IF_IDE;
+ mc->max_cpus = MAX_CPUS_CORE3;
+ mc->is_default = 0;
+ mc->reset = board_reset;
+ mc->possible_cpu_arch_ids = sw64_possible_cpu_arch_ids;
+ mc->cpu_index_to_instance_props = sw64_cpu_index_to_props;
+ mc->default_cpu_type = SW64_CPU_TYPE_NAME("core3");
+ mc->default_ram_id = "ram";
+ mc->get_default_cpu_node_id = sw64_get_default_cpu_node_id;
+}
+
+DEFINE_MACHINE("core3", core3_machine_init)
diff --git a/hw/sw64/core3_board.c b/hw/sw64/core3_board.c
new file mode 100644
index 0000000000..7853e01edb
--- /dev/null
+++ b/hw/sw64/core3_board.c
@@ -0,0 +1,493 @@
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "core.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+#include "hw/pci/pci_host.h"
+#include "hw/pci/pci.h"
+#include "hw/char/serial.h"
+#include "hw/irq.h"
+#include "net/net.h"
+#include "hw/usb.h"
+#include "hw/ide/pci.h"
+#include "hw/ide/ahci.h"
+#include "sysemu/numa.h"
+#include "sysemu/kvm.h"
+#include "hw/rtc/sun4v-rtc.h"
+#include "hw/pci/msi.h"
+#include "hw/sw64/sw64_iommu.h"
+
+#define TYPE_SWBOARD_PCI_HOST_BRIDGE "core_board-pcihost"
+#define SWBOARD_PCI_HOST_BRIDGE(obj) \
+ OBJECT_CHECK(BoardState, (obj), TYPE_SWBOARD_PCI_HOST_BRIDGE)
+
+#define MAX_IDE_BUS 2
+#define SW_PIN_TO_IRQ 16
+
+typedef struct SWBoard {
+ SW64CPU *cpu[MAX_CPUS_CORE3];
+} SWBoard;
+
+typedef struct BoardState {
+ PCIHostState parent_obj;
+
+ SWBoard sboard;
+ uint64_t expire_time;
+} BoardState;
+
+typedef struct TimerState {
+ void *opaque;
+ int order;
+} TimerState;
+
+#ifndef CONFIG_KVM
+static void swboard_alarm_timer(void *opaque)
+{
+ TimerState *ts = (TimerState *)((uintptr_t)opaque);
+ BoardState *bs = (BoardState *)((uintptr_t)ts->opaque);
+
+ int cpu = ts->order;
+ cpu_interrupt(CPU(bs->sboard.cpu[cpu]), CPU_INTERRUPT_TIMER);
+}
+#endif
+
+static PCIINTxRoute sw_route_intx_pin_to_irq(void *opaque, int pin)
+{
+ PCIINTxRoute route;
+
+ route.mode = PCI_INTX_ENABLED;
+ route.irq = SW_PIN_TO_IRQ;
+ return route;
+}
+
+static uint64_t convert_bit(int n)
+{
+ uint64_t ret = (1UL << n) - 1;
+
+ if (n == 64)
+ ret = 0xffffffffffffffffUL;
+ return ret;
+}
+
+static uint64_t mcu_read(void *opaque, hwaddr addr, unsigned size)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int smp_cpus = ms->smp.cpus;
+ uint64_t ret = 0;
+ switch (addr) {
+ case 0x0000:
+ /* CG_ONLINE */
+ {
+ int i;
+ for (i = 0; i < smp_cpus; i = i + 4)
+ ret |= (1UL << i);
+ }
+ break;
+ /*IO_START*/
+ case 0x1300:
+ ret = 0x1;
+ break;
+ case 0x3780:
+ /* MC_ONLINE */
+ ret = convert_bit(smp_cpus);
+ break;
+ case 0x0900:
+ /* CPUID */
+ ret = 0;
+ break;
+ case 0x1180:
+ /* LONGTIME */
+ ret = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 80;
+ break;
+ case 0x4900:
+ /* MC_CONFIG */
+ break;
+ case 0x0780:
+ /* CORE_ONLINE */
+ ret = convert_bit(smp_cpus);
+ break;
+ case 0x0680:
+ /* INIT_CTL */
+ ret = 0x000003AE00000D28;
+ break;
+ default:
+ fprintf(stderr, "Unsupported MCU addr: 0x%04lx\n", addr);
+ return -1;
+ }
+ return ret;
+}
+
+static void mcu_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+#ifndef CONFIG_KVM
+#ifdef CONFIG_DUMP_PRINTK
+ uint64_t print_addr;
+ uint32_t len;
+ int i;
+
+ if (addr == 0x40000) {
+ print_addr = val & 0x7fffffff;
+ len = (uint32_t)(val >> 32);
+ uint8_t *buf;
+ buf = malloc(len + 10);
+ memset(buf, 0, len + 10);
+ cpu_physical_memory_rw(print_addr, buf, len, 0);
+ for (i = 0; i < len; i++)
+ printf("%c", buf[i]);
+
+ free(buf);
+ return;
+ }
+#endif
+#endif
+}
+
+static const MemoryRegionOps mcu_ops = {
+ .read = mcu_read,
+ .write = mcu_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid =
+ {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl =
+ {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t intpu_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t ret = 0;
+#ifndef CONFIG_KVM
+ switch (addr) {
+ case 0x180:
+ /* LONGTIME */
+ ret = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 32;
+ break;
+ }
+#endif
+ return ret;
+}
+
+static void intpu_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+#ifndef CONFIG_KVM
+ BoardState *bs = (BoardState *)opaque;
+ SW64CPU *cpu;
+ switch (addr) {
+ case 0x00:
+ val &= 0x1f;
+ cpu = bs->sboard.cpu[val];
+ cpu->env.csr[II_REQ] = 0x100000;
+ cpu_interrupt(CPU(cpu),CPU_INTERRUPT_IIMAIL);
+ break;
+ default:
+ fprintf(stderr, "Unsupported IPU addr: 0x%04lx\n", addr);
+ break;
+ }
+#endif
+}
+
+static const MemoryRegionOps intpu_ops = {
+ .read = intpu_read,
+ .write = intpu_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid =
+ {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl =
+ {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static MemTxResult msi_read(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ return MEMTX_OK;
+}
+
+MemTxResult msi_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+#ifdef CONFIG_KVM
+ int ret = 0;
+ MSIMessage msg = {};
+
+ msg.address = (uint64_t) addr + 0x8000fee00000;
+ msg.data = (uint32_t) value;
+
+ ret = kvm_irqchip_send_msi(kvm_state, msg);
+ if (ret < 0) {
+ fprintf(stderr, "KVM: injection failed, MSI lost (%s)\n",
+ strerror(-ret));
+ }
+#endif
+ return MEMTX_OK;
+}
+
+static const MemoryRegionOps msi_ops = {
+ .read_with_attrs = msi_read,
+ .write_with_attrs = msi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid =
+ {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl =
+ {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size)
+{
+ return 1;
+}
+
+static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size)
+{
+}
+
+const MemoryRegionOps core3_pci_ignore_ops = {
+ .read = ignore_read,
+ .write = ignore_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid =
+ {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl =
+ {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t config_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PCIBus *b = opaque;
+ uint32_t trans_addr = 0;
+ trans_addr |= ((addr >> 16) & 0xffff) << 8;
+ trans_addr |= (addr & 0xff);
+ return pci_data_read(b, trans_addr, size);
+}
+
+static void config_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ PCIBus *b = opaque;
+ uint32_t trans_addr = 0;
+ trans_addr |= ((addr >> 16) & 0xffff) << 8;
+ trans_addr |= (addr & 0xff);
+ pci_data_write(b, trans_addr, val, size);
+}
+
+const MemoryRegionOps core3_pci_config_ops = {
+ .read = config_read,
+ .write = config_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid =
+ {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl =
+ {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static void cpu_irq_change(SW64CPU *cpu, uint64_t req)
+{
+ if (cpu != NULL) {
+ CPUState *cs = CPU(cpu);
+ if (req)
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ else
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+static void swboard_set_irq(void *opaque, int irq, int level)
+{
+ BoardState *bs = opaque;
+ SW64CPU *cpu;
+ int i;
+
+ if (kvm_enabled()) {
+ if (level == 0)
+ return;
+ kvm_set_irq(kvm_state, irq, level);
+ return;
+ }
+
+ for (i = 0; i < 1; i++) {
+ cpu = bs->sboard.cpu[i];
+ if (cpu != NULL) {
+ CPUState *cs = CPU(cpu);
+ if (level)
+ cpu_interrupt(cs, CPU_INTERRUPT_PCIE);
+ else
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_PCIE);
+ }
+ }
+}
+
+static int swboard_map_irq(PCIDevice *d, int irq_num)
+{
+ /* In fact,the return value is the interrupt type passed to kernel,
+ * so it must keep same with the type in do_entInt in kernel.
+ */
+ return 16;
+}
+
+static void serial_set_irq(void *opaque, int irq, int level)
+{
+ BoardState *bs = (BoardState *)opaque;
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int smp_cpus = ms->smp.cpus;
+ int i;
+ if (level == 0)
+ return;
+ if (kvm_enabled()) {
+ kvm_set_irq(kvm_state, irq, level);
+ return;
+ }
+ for (i = 0; i < smp_cpus; i++) {
+ if (bs->sboard.cpu[i])
+ cpu_irq_change(bs->sboard.cpu[i], 1);
+ }
+}
+
+void core3_board_init(SW64CPU *cpus[MAX_CPUS], MemoryRegion *ram)
+{
+ DeviceState *dev;
+ BoardState *bs;
+#ifndef CONFIG_KVM
+ TimerState *ts;
+#endif
+ MemoryRegion *io_mcu = g_new(MemoryRegion, 1);
+ MemoryRegion *io_intpu = g_new(MemoryRegion, 1);
+ MemoryRegion *msi_ep = g_new(MemoryRegion, 1);
+ qemu_irq serial_irq;
+ uint64_t MB = 1024 * 1024;
+ MemoryRegion *mem_ep = g_new(MemoryRegion, 1);
+ MemoryRegion *mem_ep64 = g_new(MemoryRegion, 1);
+ MemoryRegion *conf_piu0 = g_new(MemoryRegion, 1);
+ MemoryRegion *io_ep = g_new(MemoryRegion, 1);
+
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int smp_cpus = ms->smp.cpus;
+
+ PCIBus *b;
+ PCIHostState *phb;
+ uint64_t GB = 1024 * MB;
+
+ int i;
+ dev = qdev_new(TYPE_SWBOARD_PCI_HOST_BRIDGE);
+ phb = PCI_HOST_BRIDGE(dev);
+ bs = SWBOARD_PCI_HOST_BRIDGE(dev);
+
+#ifdef CONFIG_KVM
+ if (kvm_has_gsi_routing())
+ msi_nonbroken = true;
+#endif
+
+ for (i = 0; i < smp_cpus; ++i) {
+ if (cpus[i] == NULL)
+ continue;
+ bs->sboard.cpu[i] = cpus[i];
+#ifndef CONFIG_KVM
+ ts = g_new(TimerState, 1);
+ ts->opaque = (void *) ((uintptr_t)bs);
+ ts->order = i;
+ cpus[i]->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &swboard_alarm_timer, ts);
+#endif
+ }
+ memory_region_add_subregion(get_system_memory(), 0, ram);
+
+ memory_region_init_io(io_mcu, NULL, &mcu_ops, bs, "io_mcu", 16 * MB);
+ memory_region_add_subregion(get_system_memory(), 0x803000000000ULL, io_mcu);
+
+ memory_region_init_io(io_intpu, NULL, &intpu_ops, bs, "io_intpu", 1 * MB);
+ memory_region_add_subregion(get_system_memory(), 0x802a00000000ULL,
+ io_intpu);
+
+ memory_region_init_io(msi_ep, NULL, &msi_ops, bs, "msi_ep", 1 * MB);
+ memory_region_add_subregion(get_system_memory(), 0x8000fee00000ULL, msi_ep);
+
+ memory_region_init(mem_ep, OBJECT(bs), "pci0-mem", 0x890000000000ULL);
+ memory_region_add_subregion(get_system_memory(), 0x880000000000ULL, mem_ep);
+
+ memory_region_init_alias(mem_ep64, NULL, "mem_ep64", mem_ep, 0x888000000000ULL, 1ULL << 39);
+ memory_region_add_subregion(get_system_memory(), 0x888000000000ULL, mem_ep64);
+
+ memory_region_init_io(io_ep, OBJECT(bs), &core3_pci_ignore_ops, NULL,
+ "pci0-io-ep", 4 * GB);
+
+ memory_region_add_subregion(get_system_memory(), 0x880100000000ULL, io_ep);
+ b = pci_register_root_bus(dev, "pcie.0", swboard_set_irq, swboard_map_irq, bs,
+ mem_ep, io_ep, 0, 537, TYPE_PCIE_BUS);
+ phb->bus = b;
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ pci_bus_set_route_irq_fn(b, sw_route_intx_pin_to_irq);
+ memory_region_init_io(conf_piu0, OBJECT(bs), &core3_pci_config_ops, b,
+ "pci0-ep-conf-io", 4 * GB);
+ memory_region_add_subregion(get_system_memory(), 0x880600000000ULL,
+ conf_piu0);
+#ifdef SW64_VT_IOMMU
+ sw64_vt_iommu_init(b);
+#endif
+ for (i = 0; i < nb_nics; i++) {
+ pci_nic_init_nofail(&nd_table[i], b, "e1000", NULL);
+ }
+
+ pci_vga_init(b);
+#define MAX_SATA_PORTS 6
+ PCIDevice *ahci;
+ DriveInfo *hd[MAX_SATA_PORTS];
+ ahci = pci_create_simple_multifunction(b, PCI_DEVFN(0x1f, 0), true,
+ TYPE_ICH9_AHCI);
+ g_assert(MAX_SATA_PORTS == ahci_get_num_ports(ahci));
+ ide_drive_get(hd, ahci_get_num_ports(ahci));
+ ahci_ide_create_devs(ahci, hd);
+
+ serial_irq = qemu_allocate_irq(serial_set_irq, bs, 12);
+ if (serial_hd(0)) {
+ serial_mm_init(get_system_memory(), 0x3F8 + 0x880100000000ULL, 0,
+ serial_irq, (1843200 >> 4), serial_hd(0),
+ DEVICE_LITTLE_ENDIAN);
+ }
+ pci_create_simple(phb->bus, -1, "nec-usb-xhci");
+ sun4v_rtc_init(0x804910000000ULL);
+}
+
+static const TypeInfo swboard_pcihost_info = {
+ .name = TYPE_SWBOARD_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(BoardState),
+};
+
+static void swboard_register_types(void)
+{
+ type_register_static(&swboard_pcihost_info);
+}
+
+type_init(swboard_register_types)
diff --git a/hw/sw64/meson.build b/hw/sw64/meson.build
new file mode 100644
index 0000000000..8abb18222a
--- /dev/null
+++ b/hw/sw64/meson.build
@@ -0,0 +1,10 @@
+sw64_ss = ss.source_set()
+
+sw64_ss.add(files('sw64_iommu.c'))
+
+sw64_ss.add(when: 'CONFIG_CORE3', if_true: files(
+ 'core3.c',
+ 'core3_board.c',
+))
+
+hw_arch += {'sw64': sw64_ss}
diff --git a/hw/sw64/sw64_iommu.c b/hw/sw64/sw64_iommu.c
new file mode 100644
index 0000000000..8ded65f213
--- /dev/null
+++ b/hw/sw64/sw64_iommu.c
@@ -0,0 +1,567 @@
+/*
+ * QEMU sw64 IOMMU emulation
+ *
+ * Copyright (c) 2021 Lu Feifei
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "exec/address-spaces.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "hw/sw64/sw64_iommu.h"
+#include "sysemu/kvm.h"
+
+#define IOMMU_PAGE_SHIFT 13
+#define IOMMU_PAGE_SIZE_8K (1ULL << IOMMU_PAGE_SHIFT)
+#define IOMMU_PAGE_MASK_8K (~(IOMMU_PAGE_SIZE_8K - 1))
+#define IOMMU_IOVA_SHIFT 16
+#define SW64IOMMU_PTIOTLB_MAX_SIZE 256
+
+static MemTxResult swvt_msi_read(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size, MemTxAttrs attrs)
+{
+ return MEMTX_OK;
+}
+
+static MemTxResult swvt_msi_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ MemTxResult ret;
+
+ ret = msi_write(opaque, addr, value, size, attrs);
+
+ return ret;
+}
+
+static const MemoryRegionOps swvt_msi_ops = {
+ .read_with_attrs = swvt_msi_read,
+ .write_with_attrs = swvt_msi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+SWVTAddressSpace *iommu_find_add_as(SW64IOMMUState *s, PCIBus *bus, int devfn)
+{
+ uintptr_t key = (uintptr_t)bus;
+ SWVTBus *swvt_bus = g_hash_table_lookup(s->swvtbus_as_by_busptr, &key);
+ SWVTAddressSpace *swvt_dev_as;
+ char name[128];
+
+ if (!swvt_bus) {
+ uintptr_t *new_key = g_malloc(sizeof(*new_key));
+ *new_key = (uintptr_t)bus;
+ /* No corresponding free() */
+ swvt_bus = g_malloc0(sizeof(SWVTBus) + sizeof(SWVTAddressSpace *) * \
+ PCI_DEVFN_MAX);
+ swvt_bus->bus = bus;
+ g_hash_table_insert(s->swvtbus_as_by_busptr, new_key, swvt_bus);
+ }
+ swvt_dev_as = swvt_bus->dev_as[devfn];
+ if (!swvt_dev_as) {
+ snprintf(name, sizeof(name), "sw64_iommu_devfn_%d", devfn);
+ swvt_bus->dev_as[devfn] = swvt_dev_as = g_malloc0(sizeof(SWVTAddressSpace));
+
+ swvt_dev_as->bus = bus;
+ swvt_dev_as->devfn = (uint8_t)devfn;
+ swvt_dev_as->iommu_state = s;
+
+ memory_region_init_iommu(&swvt_dev_as->iommu, sizeof(swvt_dev_as->iommu),
+ TYPE_SW64_IOMMU_MEMORY_REGION, OBJECT(s),
+ "sw64_iommu_dmar",
+ 1UL << 32);
+ memory_region_init_io(&swvt_dev_as->msi, OBJECT(s),
+ &swvt_msi_ops, s, "sw_msi", 1 * 1024 * 1024);
+ memory_region_init(&swvt_dev_as->root, OBJECT(s),
+ "swvt_root", UINT64_MAX);
+ memory_region_add_subregion_overlap(&swvt_dev_as->root,
+ 0x8000fee00000ULL,
+ &swvt_dev_as->msi, 64);
+ address_space_init(&swvt_dev_as->as, &swvt_dev_as->root, name);
+ memory_region_add_subregion_overlap(&swvt_dev_as->root, 0,
+ MEMORY_REGION(&swvt_dev_as->iommu),
+ 1);
+ }
+
+ memory_region_set_enabled(MEMORY_REGION(&swvt_dev_as->iommu), true);
+
+ return swvt_dev_as;
+}
+
+/**
+ * get_pte - Get the content of a page table entry located at
+ * @base_addr[@index]
+ */
+static int get_pte(dma_addr_t baseaddr, uint64_t *pte)
+{
+ int ret;
+
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, baseaddr,
+ (uint8_t *)pte, sizeof(*pte));
+
+ if (ret != MEMTX_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool swvt_do_iommu_translate(SWVTAddressSpace *swvt_as, PCIBus *bus,
+ uint8_t devfn, hwaddr addr, IOMMUTLBEntry *entry)
+{
+ SW64IOMMUState *s = swvt_as->iommu_state;
+ uint8_t bus_num = pci_bus_num(bus);
+ unsigned long dtbbaseaddr, dtbbasecond;
+ unsigned long pdebaseaddr, ptebaseaddr;
+ unsigned long pte;
+ uint16_t source_id;
+ SW64DTIOTLBEntry *dtcached_entry = NULL;
+ SW64DTIOTLBKey dtkey, *new_key;
+
+ dtcached_entry = g_hash_table_lookup(s->dtiotlb, &dtkey);
+
+ if (unlikely(!dtcached_entry)) {
+ dtbbaseaddr = s->dtbr + (bus_num << 3);
+
+ if (get_pte(dtbbaseaddr, &pte))
+ goto error;
+
+ dtbbasecond = (pte & (~(SW_IOMMU_ENTRY_VALID))) + (devfn << 3);
+ if (get_pte(dtbbasecond, &pte))
+ goto error;
+
+ source_id = ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
+ dtcached_entry = g_new0(SW64DTIOTLBEntry, 1);
+ dtcached_entry->ptbase_addr = pte & (~(SW_IOMMU_ENTRY_VALID));
+ dtcached_entry->source_id = source_id;
+
+ new_key = g_new0(SW64DTIOTLBKey, 1);
+ new_key->source_id = source_id;
+
+ g_hash_table_insert(s->dtiotlb, new_key, dtcached_entry);
+ }
+
+ pdebaseaddr = dtcached_entry->ptbase_addr;
+ pdebaseaddr += ((addr >> 23) & SW_IOMMU_LEVEL1_OFFSET) << 3;
+
+ if (get_pte(pdebaseaddr, &pte))
+ goto error;
+
+ ptebaseaddr = pte & (~(SW_IOMMU_ENTRY_VALID));
+ ptebaseaddr += ((addr >> IOMMU_PAGE_SHIFT) & SW_IOMMU_LEVEL2_OFFSET) << 3;
+
+ if (get_pte(ptebaseaddr, &pte))
+ goto error;
+
+ pte &= ~(SW_IOMMU_ENTRY_VALID | SW_IOMMU_GRN | SW_IOMMU_ENABLE);
+ entry->translated_addr = pte;
+ entry->addr_mask = IOMMU_PAGE_SIZE_8K - 1;
+
+ return 0;
+
+error:
+ entry->perm = IOMMU_NONE;
+ return -EINVAL;
+}
+
+static void swvt_ptiotlb_inv_all(SW64IOMMUState *s)
+{
+ g_hash_table_remove_all(s->ptiotlb);
+}
+
+static void swvt_lookup_ptiotlb(SW64IOMMUState *s, uint16_t source_id,
+ hwaddr addr, IOMMUTLBEntry *entry)
+{
+ SW64PTIOTLBKey ptkey;
+
+ ptkey.source_id = source_id;
+ ptkey.iova = addr;
+
+ entry = g_hash_table_lookup(s->ptiotlb, &ptkey);
+}
+
+static IOMMUTLBEntry sw64_translate_iommu(IOMMUMemoryRegion *iommu, hwaddr addr,
+ IOMMUAccessFlags flag, int iommu_idx)
+{
+ SWVTAddressSpace *swvt_as = container_of(iommu, SWVTAddressSpace, iommu);
+ SW64IOMMUState *s = swvt_as->iommu_state;
+ IOMMUTLBEntry *cached_entry = NULL;
+ IOMMUTLBEntry entry = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = addr,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+ uint8_t bus_num = pci_bus_num(swvt_as->bus);
+ uint16_t source_id;
+ SW64PTIOTLBKey *new_ptkey;
+ hwaddr aligned_addr;
+
+ source_id = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL);
+
+ qemu_mutex_lock(&s->iommu_lock);
+
+ aligned_addr = addr & IOMMU_PAGE_MASK_8K;
+
+ swvt_lookup_ptiotlb(s, aligned_addr, source_id, cached_entry);
+
+ if (cached_entry)
+ goto out;
+
+ if (g_hash_table_size(s->ptiotlb) >= SW64IOMMU_PTIOTLB_MAX_SIZE) {
+ swvt_ptiotlb_inv_all(s);
+ }
+
+ cached_entry = g_new0(IOMMUTLBEntry, 1);
+
+ if (swvt_do_iommu_translate(swvt_as, swvt_as->bus, swvt_as->devfn,
+ addr, cached_entry)) {
+ g_free(cached_entry);
+ qemu_mutex_unlock(&s->iommu_lock);
+ printf("%s: detected translation failure "
+ "(busnum=%d, devfn=%#x, iova=%#lx.\n",
+ __func__, pci_bus_num(swvt_as->bus), swvt_as->devfn,
+ entry.iova);
+ entry.iova = 0;
+ entry.translated_addr = 0;
+ entry.addr_mask = 0;
+ entry.perm = IOMMU_NONE;
+
+ return entry;
+ } else {
+ new_ptkey = g_new0(SW64PTIOTLBKey, 1);
+ new_ptkey->source_id = source_id;
+ new_ptkey->iova = aligned_addr;
+ g_hash_table_insert(s->ptiotlb, new_ptkey, cached_entry);
+ }
+
+out:
+ qemu_mutex_unlock(&s->iommu_lock);
+ entry.perm = flag;
+ entry.translated_addr = cached_entry->translated_addr +
+ (addr & (IOMMU_PAGE_SIZE_8K - 1));
+ entry.addr_mask = cached_entry->addr_mask;
+
+ return entry;
+}
+
+static void swvt_ptiotlb_inv_iova(SW64IOMMUState *s, uint16_t source_id, dma_addr_t iova)
+{
+ SW64PTIOTLBKey key = {.source_id = source_id, .iova = iova};
+
+ qemu_mutex_lock(&s->iommu_lock);
+ g_hash_table_remove(s->ptiotlb, &key);
+ qemu_mutex_unlock(&s->iommu_lock);
+}
+
+void swvt_address_space_unmap_iova(SW64IOMMUState *s, unsigned long val)
+{
+ SWVTAddressSpace *swvt_as;
+ IOMMUNotifier *n;
+ uint16_t source_id;
+ dma_addr_t iova;
+ IOMMUTLBEvent event;
+
+ source_id = val & 0xffff;
+ iova = (val >> IOMMU_IOVA_SHIFT) << IOMMU_PAGE_SHIFT;
+
+ swvt_ptiotlb_inv_iova(s, source_id, iova);
+
+ QLIST_FOREACH(swvt_as, &s->swvt_as_with_notifiers, next) {
+ uint8_t bus_num = pci_bus_num(swvt_as->bus);
+ uint16_t as_sourceid = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL);
+
+ if (as_sourceid == source_id) {
+ IOMMU_NOTIFIER_FOREACH(n, &swvt_as->iommu) {
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.target_as = &address_space_memory;
+ event.entry.iova = iova & IOMMU_PAGE_MASK_8K;
+ event.entry.translated_addr = 0;
+ event.entry.perm = IOMMU_NONE;
+ event.entry.addr_mask = IOMMU_PAGE_SIZE_8K - 1;
+
+ memory_region_notify_iommu(&swvt_as->iommu, 0, event);
+ }
+ }
+ }
+}
+
+/* Unmap the whole range in the notifier's scope. */
+static void swvt_address_space_unmap(SWVTAddressSpace *as, IOMMUNotifier *n)
+{
+ IOMMUTLBEvent event;
+ hwaddr size;
+ hwaddr start = n->start;
+ hwaddr end = n->end;
+
+ assert(start <= end);
+ size = end - start;
+
+ event.entry.target_as = &address_space_memory;
+ /* Adjust iova for the size */
+ event.entry.iova = n->start & ~(size - 1);
+ /* This field is meaningless for unmap */
+ event.entry.translated_addr = 0;
+ event.entry.perm = IOMMU_NONE;
+ event.entry.addr_mask = size - 1;
+
+ memory_region_notify_iommu_one(n, &event);
+}
+
+void swvt_address_space_map_iova(SW64IOMMUState *s, unsigned long val)
+{
+ SWVTAddressSpace *swvt_as;
+ IOMMUNotifier *n;
+ uint16_t source_id;
+ dma_addr_t iova;
+ IOMMUTLBEvent event;
+ int ret;
+
+ source_id = val & 0xffff;
+ iova = (val >> IOMMU_IOVA_SHIFT) << IOMMU_PAGE_SHIFT;
+
+ swvt_ptiotlb_inv_iova(s, source_id, iova);
+
+ QLIST_FOREACH(swvt_as, &s->swvt_as_with_notifiers, next) {
+ uint8_t bus_num = pci_bus_num(swvt_as->bus);
+ uint16_t as_sourceid = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL);
+
+ if (as_sourceid == source_id) {
+ IOMMU_NOTIFIER_FOREACH(n, &swvt_as->iommu) {
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.target_as = &address_space_memory;
+ event.entry.iova = iova & IOMMU_PAGE_MASK_8K;
+ event.entry.perm = IOMMU_RW;
+
+ ret = swvt_do_iommu_translate(swvt_as, swvt_as->bus,
+ swvt_as->devfn, iova, &event.entry);
+ if (ret)
+ goto out;
+
+ memory_region_notify_iommu(&swvt_as->iommu, 0, event);
+ }
+ }
+ }
+out:
+ return;
+}
+
+void swvt_address_space_invalidate_iova(SW64IOMMUState *s, unsigned long val)
+{
+ int map_flag;
+
+ map_flag = val >> 36;
+
+ if (map_flag)
+ swvt_address_space_map_iova(s, val & 0xfffffffff);
+ else
+ swvt_address_space_unmap_iova(s, val);
+
+ return;
+}
+
+static AddressSpace *sw64_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+ SW64IOMMUState *s = opaque;
+ SWVTAddressSpace *swvt_as;
+
+ assert(0 <= devfn && devfn < PCI_DEVFN_MAX);
+
+ swvt_as = iommu_find_add_as(s, bus, devfn);
+ return &swvt_as->as;
+}
+
+static uint64_t piu0_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t ret = 0;
+ switch (addr) {
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void piu0_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ SW64IOMMUState *s = (SW64IOMMUState *)opaque;
+
+ switch (addr) {
+ case 0xb000:
+ /* DTBaseAddr */
+ s->dtbr = val;
+ break;
+ case 0xb280:
+ /* PTLB_FlushVAddr */
+ swvt_address_space_invalidate_iova(s, val);
+ break;
+ default:
+ break;
+ }
+}
+
+const MemoryRegionOps core3_pci_piu0_ops = {
+ .read = piu0_read,
+ .write = piu0_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+void sw64_vt_iommu_init(PCIBus *b)
+{
+ DeviceState *dev_iommu;
+ SW64IOMMUState *s;
+ MemoryRegion *io_piu0 = g_new(MemoryRegion, 1);
+
+ dev_iommu = qdev_new(TYPE_SW64_IOMMU);
+ s = SW64_IOMMU(dev_iommu);
+
+ s->pci_bus = b;
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev_iommu), &error_fatal);
+
+ pci_setup_iommu(b, sw64_dma_iommu, dev_iommu);
+
+ memory_region_init_io(io_piu0, OBJECT(s), &core3_pci_piu0_ops, s,
+ "pci0-piu0-io", 4 * 1024 * 1024);
+ memory_region_add_subregion(get_system_memory(), 0x880200000000ULL,
+ io_piu0);
+}
+
+static int swvt_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
+ IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new,
+ Error **errp)
+{
+ SWVTAddressSpace *swvt_as = container_of(iommu, SWVTAddressSpace, iommu);
+ SW64IOMMUState *s = swvt_as->iommu_state;
+
+ /* Update per-address-space notifier flags */
+ swvt_as->notifier_flags = new;
+
+ if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
+ error_setg(errp, "swvt does not support dev-iotlb yet");
+ return -EINVAL;
+ }
+
+ if (old == IOMMU_NOTIFIER_NONE) {
+ QLIST_INSERT_HEAD(&s->swvt_as_with_notifiers, swvt_as, next);
+ } else if (new == IOMMU_NOTIFIER_NONE) {
+ QLIST_REMOVE(swvt_as, next);
+ }
+ return 0;
+}
+
+static void swvt_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
+{
+ SWVTAddressSpace *swvt_as = container_of(iommu_mr, SWVTAddressSpace, iommu);
+
+ /*
+ * The replay can be triggered by either a invalidation or a newly
+ * created entry. No matter what, we release existing mappings
+ * (it means flushing caches for UNMAP-only registers).
+ */
+ swvt_address_space_unmap(swvt_as, n);
+}
+
+/* GHashTable functions */
+static gboolean swvt_uint64_equal(gconstpointer v1, gconstpointer v2)
+{
+ return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static guint swvt_uint64_hash(gconstpointer v)
+{
+ return (guint)*(const uint64_t *)v;
+}
+
+static void iommu_realize(DeviceState *d, Error **errp)
+{
+ SW64IOMMUState *s = SW64_IOMMU(d);
+
+ QLIST_INIT(&s->swvt_as_with_notifiers);
+ qemu_mutex_init(&s->iommu_lock);
+
+ s->dtiotlb = g_hash_table_new_full(swvt_uint64_hash, swvt_uint64_equal,
+ g_free, g_free);
+ s->ptiotlb = g_hash_table_new_full(swvt_uint64_hash, swvt_uint64_equal,
+ g_free, g_free);
+
+ s->swvtbus_as_by_busptr = g_hash_table_new(NULL, NULL);
+}
+
+static void iommu_reset(DeviceState *d)
+{
+}
+
+static void sw64_iommu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = iommu_reset;
+ dc->realize = iommu_realize;
+}
+
+static void sw64_iommu_memory_region_class_init(ObjectClass *klass, void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = sw64_translate_iommu;
+ imrc->notify_flag_changed = swvt_iommu_notify_flag_changed;
+ imrc->replay = swvt_iommu_replay;
+}
+
+static const TypeInfo sw64_iommu_info = {
+ .name = TYPE_SW64_IOMMU,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SW64IOMMUState),
+ .class_init = sw64_iommu_class_init,
+ .class_size = sizeof(SW64IOMMUClass),
+};
+
+static const TypeInfo sw64_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_SW64_IOMMU_MEMORY_REGION,
+ .class_init = sw64_iommu_memory_region_class_init,
+};
+
+static void sw64_iommu_register_types(void)
+{
+ type_register_static(&sw64_iommu_info);
+ type_register_static(&sw64_iommu_memory_region_info);
+}
+
+type_init(sw64_iommu_register_types)
diff --git a/hw/sw64/trace-events b/hw/sw64/trace-events
new file mode 100644
index 0000000000..1aa744c984
--- /dev/null
+++ b/hw/sw64/trace-events
@@ -0,0 +1,3 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# pci.c
diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h
index 08e1beec85..4590bcc968 100644
--- a/include/disas/dis-asm.h
+++ b/include/disas/dis-asm.h
@@ -191,6 +191,9 @@ enum bfd_architecture
#define bfd_mach_alpha_ev4 0x10
#define bfd_mach_alpha_ev5 0x20
#define bfd_mach_alpha_ev6 0x30
+ bfd_arch_sw_64, /* Dec Sw_64 */
+#define bfd_mach_sw_64 1
+#define bfd_mach_sw_64_core3 1621
bfd_arch_arm, /* Advanced Risc Machines ARM */
#define bfd_mach_arm_unknown 0
#define bfd_mach_arm_2 1
@@ -429,6 +432,7 @@ int print_insn_h8500 (bfd_vma, disassemble_info*);
int print_insn_arm_a64 (bfd_vma, disassemble_info*);
int print_insn_alpha (bfd_vma, disassemble_info*);
disassembler_ftype arc_get_disassembler (int, int);
+int print_insn_sw_64 (bfd_vma, disassemble_info*);
int print_insn_arm (bfd_vma, disassemble_info*);
int print_insn_sparc (bfd_vma, disassemble_info*);
int print_insn_big_a29k (bfd_vma, disassemble_info*);
diff --git a/include/elf.h b/include/elf.h
index 811bf4a1cb..79c188b62f 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -207,6 +207,8 @@ typedef struct mips_elf_abiflags_v0 {
#define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */
+#define EM_SW64 0x9916 /* SW64 */
+
/* This is the info that is needed to parse the dynamic section of the file */
#define DT_NULL 0
#define DT_NEEDED 1
@@ -1417,6 +1419,48 @@ typedef struct {
#define EF_RISCV_RVE 0x0008
#define EF_RISCV_TSO 0x0010
+/*
+ SW_64 ELF relocation types
+ */
+#define EM_SW_64 0x9916
+#define R_SW_64_NONE 0 /* No reloc */
+#define R_SW_64_REFLONG 1 /* Direct 32 bit */
+#define R_SW_64_REFQUAD 2 /* Direct 64 bit */
+#define R_SW_64_GPREL32 3 /* GP relative 32 bit */
+#define R_SW_64_LITERAL 4 /* GP relative 16 bit w/optimization */
+#define R_SW_64_LITUSE 5 /* Optimization hint for LITERAL */
+#define R_SW_64_GPDISP 6 /* Add displacement to GP */
+#define R_SW_64_BRADDR 7 /* PC+4 relative 23 bit shifted */
+#define R_SW_64_HINT 8 /* PC+4 relative 16 bit shifted */
+#define R_SW_64_SREL16 9 /* PC relative 16 bit */
+#define R_SW_64_SREL32 10 /* PC relative 32 bit */
+#define R_SW_64_SREL64 11 /* PC relative 64 bit */
+#define R_SW_64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */
+#define R_SW_64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */
+#define R_SW_64_GPREL16 19 /* GP relative 16 bit */
+#define R_SW_64_COPY 24 /* Copy symbol at runtime */
+#define R_SW_64_GLOB_DAT 25 /* Create GOT entry */
+#define R_SW_64_JMP_SLOT 26 /* Create PLT entry */
+#define R_SW_64_RELATIVE 27 /* Adjust by program base */
+#define R_SW_64_TLS_GD_HI 28
+#define R_SW_64_TLSGD 29
+#define R_SW_64_TLS_LDM 30
+#define R_SW_64_DTPMOD64 31
+#define R_SW_64_GOTDTPREL 32
+#define R_SW_64_DTPREL64 33
+#define R_SW_64_DTPRELHI 34
+#define R_SW_64_DTPRELLO 35
+#define R_SW_64_DTPREL16 36
+#define R_SW_64_GOTTPREL 37
+#define R_SW_64_TPREL64 38
+#define R_SW_64_TPRELHI 39
+#define R_SW_64_TPRELLO 40
+#define R_SW_64_TPREL16 41
+/* Keep this the last entry. */
+#define R_SW_64_NUM 46
+/* Legal values for sh_flags field of Elf64_Shdr. */
+#define SHF_SW_64_GPREL 0x10000000
+
typedef struct elf32_rel {
Elf32_Addr r_offset;
Elf32_Word r_info;
diff --git a/include/hw/sw64/sw64_iommu.h b/include/hw/sw64/sw64_iommu.h
new file mode 100644
index 0000000000..7191876083
--- /dev/null
+++ b/include/hw/sw64/sw64_iommu.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2021-2025 Wuxi Institute of Advanced Technology
+ * Written by Lu Feifei
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HW_SW64_IOMMU_H
+#define HW_SW64_IOMMU_H
+
+#include "hw/sysbus.h"
+#include "hw/pci/pci.h"
+
+#define TYPE_SW64_IOMMU_MEMORY_REGION "sw64-iommu-memory-region"
+#define SW_IOMMU_ENTRY_VALID ((1UL) << 63)
+#define SW_IOMMU_LEVEL1_OFFSET 0x1ff
+#define SW_IOMMU_LEVEL2_OFFSET 0x3ff
+#define SW_IOMMU_ENABLE 3
+#define SW_IOMMU_GRN ((0UL) << 4)
+#define SWVT_PCI_BUS_MAX 256
+
+typedef struct SW64IOMMUClass SW64IOMMUClass;
+typedef struct SW64IOMMUState SW64IOMMUState;
+typedef struct SWVTAddressSpace SWVTAddressSpace;
+typedef struct SW64DTIOTLBKey SW64DTIOTLBKey;
+typedef struct SW64PTIOTLBKey SW64PTIOTLBKey;
+typedef struct SW64DTIOTLBEntry SW64DTIOTLBEntry;
+typedef struct SWVTBus SWVTBus;
+
+struct SW64DTIOTLBEntry {
+ uint16_t source_id;
+ unsigned long ptbase_addr;
+};
+
+struct SW64DTIOTLBKey {
+ uint16_t source_id;
+};
+
+struct SW64PTIOTLBKey {
+ uint16_t source_id;
+ dma_addr_t iova;
+};
+
+struct SWVTAddressSpace {
+ PCIBus *bus;
+ uint8_t devfn;
+ AddressSpace as;
+ IOMMUMemoryRegion iommu;
+ MemoryRegion root;
+ MemoryRegion msi; /* Interrupt region: 0xfeeXXXXX */
+ SW64IOMMUState *iommu_state;
+ QLIST_ENTRY(SWVTAddressSpace) next;
+ /* Superset of notifier flags that this address space has */
+ IOMMUNotifierFlag notifier_flags;
+};
+
+struct SWVTBus {
+ PCIBus* bus; /* A reference to the bus to provide translation for */
+ SWVTAddressSpace *dev_as[0]; /* A table of SWVTAddressSpace objects indexed by devfn */
+};
+
+struct SW64IOMMUState {
+ SysBusDevice busdev;
+ dma_addr_t dtbr; /* Current root table pointer */
+ GHashTable *dtiotlb; /* IOTLB for device table */
+ GHashTable *ptiotlb; /* IOTLB for page table */
+
+ GHashTable *swvtbus_as_by_busptr;
+ /* list of registered notifiers */
+ QLIST_HEAD(, SWVTAddressSpace) swvt_as_with_notifiers;
+
+ PCIBus *pci_bus;
+ QemuMutex iommu_lock;
+};
+
+struct SW64IOMMUClass {
+ SysBusDeviceClass parent;
+ DeviceRealize realize;
+};
+
+#define TYPE_SW64_IOMMU "sw64-iommu"
+#define SW64_IOMMU(obj) \
+ OBJECT_CHECK(SW64IOMMUState, (obj), TYPE_SW64_IOMMU)
+#define SW64_IOMMU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SW64IOMMUClass, (klass), TYPE_SW64_IOMMU)
+#define SW64_IOMMU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SW64IOMMUClass, (obj), TYPE_SW64_IOMMU)
+extern void sw64_vt_iommu_init(PCIBus *b);
+extern void swvt_address_space_invalidate_iova(SW64IOMMUState *s, unsigned long val);
+extern void swvt_address_space_unmap_iova(SW64IOMMUState *s, unsigned long val);
+extern void swvt_address_space_map_iova(SW64IOMMUState *s, unsigned long val);
+extern SWVTAddressSpace *iommu_find_add_as(SW64IOMMUState *s, PCIBus *bus, int devfn);
+extern MemTxResult msi_write(void *opaque, hwaddr addr, uint64_t value, unsigned size,
+ MemTxAttrs attrs);
+#endif
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 112a29910b..6141122308 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -85,6 +85,8 @@
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
#elif defined(__alpha__)
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
+#elif defined(__sw_64__)
+#define smp_read_barrier_depends() asm volatile("memb":::"memory")
#else
#define smp_read_barrier_depends() barrier()
#endif
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index d263fad9a4..e6d442abee 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -1007,6 +1007,16 @@ static inline int64_t cpu_get_host_ticks(void)
return cur - ofs;
}
+#elif defined(__sw_64__)
+
+static inline int64_t cpu_get_host_ticks(void)
+{
+ uint64_t cc;
+
+ asm volatile("rtc %0" : "=r"(cc));
+ return cc;
+}
+
#else
/* The host CPU doesn't have an easily accessible cycle counter.
Just return a monotonically increasing value. This will be
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index 70c579560a..1cf27baa7c 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -24,6 +24,7 @@ enum {
QEMU_ARCH_RX = (1 << 20),
QEMU_ARCH_AVR = (1 << 21),
QEMU_ARCH_HEXAGON = (1 << 22),
+ QEMU_ARCH_SW64 = (1 << 23),
};
extern const uint32_t arch_type;
diff --git a/linux-headers/asm-sw64/kvm.h b/linux-headers/asm-sw64/kvm.h
new file mode 100644
index 0000000000..b0ce2ca346
--- /dev/null
+++ b/linux-headers/asm-sw64/kvm.h
@@ -0,0 +1,122 @@
+#ifndef __LINUX_KVM_SW64_H
+#define __LINUX_KVM_SW64_H
+
+#include <linux/types.h>
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ */
+struct kvm_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+
+ unsigned long r27;
+ unsigned long r28;
+ unsigned long __padding0;
+ unsigned long fpcr;
+
+ unsigned long fp[124];
+ /* These are saved by hmcode: */
+ unsigned long ps;
+ unsigned long pc;
+ unsigned long gp;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+};
+
+struct vcpucb {
+ unsigned long go_flag;
+ unsigned long pcbb;
+ unsigned long ksp;
+ unsigned long usp;
+ unsigned long kgp;
+ unsigned long ent_arith;
+ unsigned long ent_if;
+ unsigned long ent_int;
+ unsigned long ent_mm;
+ unsigned long ent_sys;
+ unsigned long ent_una;
+ unsigned long stack_pc;
+ unsigned long new_a0;
+ unsigned long new_a1;
+ unsigned long new_a2;
+ unsigned long whami;
+ unsigned long csr_save;
+ unsigned long wakeup_magic;
+ unsigned long host_vcpucb;
+ unsigned long upcr;
+ unsigned long vpcr;
+ unsigned long dtb_pcr;
+ unsigned long guest_ksp;
+ unsigned long guest_usp;
+ unsigned long vcpu_irq_disabled;
+ unsigned long vcpu_irq;
+ unsigned long ptbr;
+ unsigned long int_stat0;
+ unsigned long int_stat1;
+ unsigned long int_stat2;
+ unsigned long int_stat3;
+ unsigned long reset_entry;
+ unsigned long pvcpu;
+ unsigned long exit_reason;
+ unsigned long ipaddr;
+ unsigned long vcpu_irq_vector;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ */
+struct kvm_fpu {
+};
+
+/*
+ * KVM SW_64 specific structures and definitions
+ */
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+#define KVM_SW64_VCPU_INIT _IO(KVMIO, 0xba)
+#define KVM_SW64_USE_SLAVE _IO(KVMIO, 0xbb)
+#define KVM_SW64_GET_VCB _IO(KVMIO, 0xbc)
+#define KVM_SW64_SET_VCB _IO(KVMIO, 0xbd)
+
+#endif /* __LINUX_KVM_SW64_H */
diff --git a/linux-headers/asm-sw64/unistd.h b/linux-headers/asm-sw64/unistd.h
new file mode 100644
index 0000000000..affe297e73
--- /dev/null
+++ b/linux-headers/asm-sw64/unistd.h
@@ -0,0 +1,380 @@
+#ifndef _UAPI_ASM_SW64_UNISTD_64_H
+#define _UAPI_ASM_SW64_UNISTD_64_H
+
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_close 6
+#define __NR_osf_wait4 7
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_chdir 12
+#define __NR_fchdir 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+#define __NR_brk 17
+#define __NR_lseek 19
+#define __NR_getxpid 20
+#define __NR_osf_mount 21
+#define __NR_umount2 22
+#define __NR_setuid 23
+#define __NR_getxuid 24
+#define __NR_ptrace 26
+#define __NR_access 33
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_setpgid 39
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_osf_set_program_attributes 43
+#define __NR_open 45
+#define __NR_getxgid 47
+#define __NR_osf_sigprocmask 48
+#define __NR_acct 51
+#define __NR_sigpending 52
+#define __NR_ioctl 54
+#define __NR_symlink 57
+#define __NR_readlink 58
+#define __NR_execve 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_getpgrp 63
+#define __NR_getpagesize 64
+#define __NR_vfork 66
+#define __NR_stat 67
+#define __NR_lstat 68
+#define __NR_mmap 71
+#define __NR_munmap 73
+#define __NR_mprotect 74
+#define __NR_madvise 75
+#define __NR_vhangup 76
+#define __NR_getgroups 79
+#define __NR_setgroups 80
+#define __NR_setpgrp 82
+#define __NR_osf_setitimer 83
+#define __NR_osf_getitimer 86
+#define __NR_gethostname 87
+#define __NR_sethostname 88
+#define __NR_getdtablesize 89
+#define __NR_dup2 90
+#define __NR_fstat 91
+#define __NR_fcntl 92
+#define __NR_osf_select 93
+#define __NR_poll 94
+#define __NR_fsync 95
+#define __NR_setpriority 96
+#define __NR_socket 97
+#define __NR_connect 98
+#define __NR_accept 99
+#define __NR_getpriority 100
+#define __NR_send 101
+#define __NR_recv 102
+#define __NR_sigreturn 103
+#define __NR_bind 104
+#define __NR_setsockopt 105
+#define __NR_listen 106
+#define __NR_sigsuspend 111
+#define __NR_osf_sigstack 112
+#define __NR_recvmsg 113
+#define __NR_sendmsg 114
+#define __NR_osf_gettimeofday 116
+#define __NR_osf_getrusage 117
+#define __NR_getsockopt 118
+#define __NR_socketcall 119
+#define __NR_readv 120
+#define __NR_writev 121
+#define __NR_osf_settimeofday 122
+#define __NR_fchown 123
+#define __NR_fchmod 124
+#define __NR_recvfrom 125
+#define __NR_setreuid 126
+#define __NR_setregid 127
+#define __NR_rename 128
+#define __NR_truncate 129
+#define __NR_ftruncate 130
+#define __NR_flock 131
+#define __NR_setgid 132
+#define __NR_sendto 133
+#define __NR_shutdown 134
+#define __NR_socketpair 135
+#define __NR_mkdir 136
+#define __NR_rmdir 137
+#define __NR_osf_utimes 138
+#define __NR_getpeername 141
+#define __NR_getrlimit 144
+#define __NR_setrlimit 145
+#define __NR_setsid 147
+#define __NR_quotactl 148
+#define __NR_getsockname 150
+#define __NR_sigaction 156
+#define __NR_osf_getdirentries 159
+#define __NR_osf_statfs 160
+#define __NR_osf_fstatfs 161
+#define __NR_osf_getdomainname 165
+#define __NR_setdomainname 166
+#define __NR_bpf 170
+#define __NR_userfaultfd 171
+#define __NR_membarrier 172
+#define __NR_mlock2 173
+#define __NR_getpid 174
+#define __NR_getppid 175
+#define __NR_getuid 176
+#define __NR_geteuid 177
+#define __NR_getgid 178
+#define __NR_getegid 179
+#define __NR_osf_swapon 199
+#define __NR_msgctl 200
+#define __NR_msgget 201
+#define __NR_msgrcv 202
+#define __NR_msgsnd 203
+#define __NR_semctl 204
+#define __NR_semget 205
+#define __NR_semop 206
+#define __NR_osf_utsname 207
+#define __NR_lchown 208
+#define __NR_shmat 209
+#define __NR_shmctl 210
+#define __NR_shmdt 211
+#define __NR_shmget 212
+#define __NR_msync 217
+#define __NR_osf_stat 224
+#define __NR_osf_lstat 225
+#define __NR_osf_fstat 226
+#define __NR_osf_statfs64 227
+#define __NR_osf_fstatfs64 228
+#define __NR_statfs64 229
+#define __NR_fstatfs64 230
+#define __NR_getpgid 233
+#define __NR_getsid 234
+#define __NR_sigaltstack 235
+#define __NR_osf_sysinfo 241
+#define __NR_osf_proplist_syscall 244
+#define __NR_osf_usleep_thread 251
+#define __NR_sysfs 254
+#define __NR_osf_getsysinfo 256
+#define __NR_osf_setsysinfo 257
+#define __NR_bdflush 300
+#define __NR_sethae 301
+#define __NR_mount 302
+#define __NR_old_adjtimex 303
+#define __NR_swapoff 304
+#define __NR_getdents 305
+#define __NR_create_module 306
+#define __NR_init_module 307
+#define __NR_delete_module 308
+#define __NR_get_kernel_syms 309
+#define __NR_syslog 310
+#define __NR_reboot 311
+#define __NR_clone 312
+#define __NR_uselib 313
+#define __NR_mlock 314
+#define __NR_munlock 315
+#define __NR_mlockall 316
+#define __NR_munlockall 317
+#define __NR_sysinfo 318
+#define __NR__sysctl 319
+#define __NR_oldumount 321
+#define __NR_swapon 322
+#define __NR_times 323
+#define __NR_personality 324
+#define __NR_setfsuid 325
+#define __NR_setfsgid 326
+#define __NR_ustat 327
+#define __NR_statfs 328
+#define __NR_fstatfs 329
+#define __NR_sched_setparam 330
+#define __NR_sched_getparam 331
+#define __NR_sched_setscheduler 332
+#define __NR_sched_getscheduler 333
+#define __NR_sched_yield 334
+#define __NR_sched_get_priority_max 335
+#define __NR_sched_get_priority_min 336
+#define __NR_sched_rr_get_interval 337
+#define __NR_afs_syscall 338
+#define __NR_uname 339
+#define __NR_nanosleep 340
+#define __NR_mremap 341
+#define __NR_nfsservctl 342
+#define __NR_setresuid 343
+#define __NR_getresuid 344
+#define __NR_pciconfig_read 345
+#define __NR_pciconfig_write 346
+#define __NR_query_module 347
+#define __NR_prctl 348
+#define __NR_pread64 349
+#define __NR_pwrite64 350
+#define __NR_rt_sigreturn 351
+#define __NR_rt_sigaction 352
+#define __NR_rt_sigprocmask 353
+#define __NR_rt_sigpending 354
+#define __NR_rt_sigtimedwait 355
+#define __NR_rt_sigqueueinfo 356
+#define __NR_rt_sigsuspend 357
+#define __NR_select 358
+#define __NR_gettimeofday 359
+#define __NR_settimeofday 360
+#define __NR_getitimer 361
+#define __NR_setitimer 362
+#define __NR_utimes 363
+#define __NR_getrusage 364
+#define __NR_wait4 365
+#define __NR_adjtimex 366
+#define __NR_getcwd 367
+#define __NR_capget 368
+#define __NR_capset 369
+#define __NR_sendfile 370
+#define __NR_setresgid 371
+#define __NR_getresgid 372
+#define __NR_dipc 373
+#define __NR_pivot_root 374
+#define __NR_mincore 375
+#define __NR_pciconfig_iobase 376
+#define __NR_getdents64 377
+#define __NR_gettid 378
+#define __NR_readahead 379
+#define __NR_tkill 381
+#define __NR_setxattr 382
+#define __NR_lsetxattr 383
+#define __NR_fsetxattr 384
+#define __NR_getxattr 385
+#define __NR_lgetxattr 386
+#define __NR_fgetxattr 387
+#define __NR_listxattr 388
+#define __NR_llistxattr 389
+#define __NR_flistxattr 390
+#define __NR_removexattr 391
+#define __NR_lremovexattr 392
+#define __NR_fremovexattr 393
+#define __NR_futex 394
+#define __NR_sched_setaffinity 395
+#define __NR_sched_getaffinity 396
+#define __NR_tuxcall 397
+#define __NR_io_setup 398
+#define __NR_io_destroy 399
+#define __NR_io_getevents 400
+#define __NR_io_submit 401
+#define __NR_io_cancel 402
+#define __NR_io_pgetevents 403
+#define __NR_rseq 404
+#define __NR_exit_group 405
+#define __NR_lookup_dcookie 406
+#define __NR_epoll_create 407
+#define __NR_epoll_ctl 408
+#define __NR_epoll_wait 409
+#define __NR_remap_file_pages 410
+#define __NR_set_tid_address 411
+#define __NR_restart_syscall 412
+#define __NR_fadvise64 413
+#define __NR_timer_create 414
+#define __NR_timer_settime 415
+#define __NR_timer_gettime 416
+#define __NR_timer_getoverrun 417
+#define __NR_timer_delete 418
+#define __NR_clock_settime 419
+#define __NR_clock_gettime 420
+#define __NR_clock_getres 421
+#define __NR_clock_nanosleep 422
+#define __NR_semtimedop 423
+#define __NR_tgkill 424
+#define __NR_stat64 425
+#define __NR_lstat64 426
+#define __NR_fstat64 427
+#define __NR_vserver 428
+#define __NR_mbind 429
+#define __NR_get_mempolicy 430
+#define __NR_set_mempolicy 431
+#define __NR_mq_open 432
+#define __NR_mq_unlink 433
+#define __NR_mq_timedsend 434
+#define __NR_mq_timedreceive 435
+#define __NR_mq_notify 436
+#define __NR_mq_getsetattr 437
+#define __NR_waitid 438
+#define __NR_add_key 439
+#define __NR_request_key 440
+#define __NR_keyctl 441
+#define __NR_ioprio_set 442
+#define __NR_ioprio_get 443
+#define __NR_inotify_init 444
+#define __NR_inotify_add_watch 445
+#define __NR_inotify_rm_watch 446
+#define __NR_fdatasync 447
+#define __NR_kexec_load 448
+#define __NR_migrate_pages 449
+#define __NR_openat 450
+#define __NR_mkdirat 451
+#define __NR_mknodat 452
+#define __NR_fchownat 453
+#define __NR_futimesat 454
+#define __NR_fstatat64 455
+#define __NR_unlinkat 456
+#define __NR_renameat 457
+#define __NR_linkat 458
+#define __NR_symlinkat 459
+#define __NR_readlinkat 460
+#define __NR_fchmodat 461
+#define __NR_faccessat 462
+#define __NR_pselect6 463
+#define __NR_ppoll 464
+#define __NR_unshare 465
+#define __NR_set_robust_list 466
+#define __NR_get_robust_list 467
+#define __NR_splice 468
+#define __NR_sync_file_range 469
+#define __NR_tee 470
+#define __NR_vmsplice 471
+#define __NR_move_pages 472
+#define __NR_getcpu 473
+#define __NR_epoll_pwait 474
+#define __NR_utimensat 475
+#define __NR_signalfd 476
+#define __NR_timerfd 477
+#define __NR_eventfd 478
+#define __NR_recvmmsg 479
+#define __NR_fallocate 480
+#define __NR_timerfd_create 481
+#define __NR_timerfd_settime 482
+#define __NR_timerfd_gettime 483
+#define __NR_signalfd4 484
+#define __NR_eventfd2 485
+#define __NR_epoll_create1 486
+#define __NR_dup3 487
+#define __NR_pipe2 488
+#define __NR_inotify_init1 489
+#define __NR_preadv 490
+#define __NR_pwritev 491
+#define __NR_rt_tgsigqueueinfo 492
+#define __NR_perf_event_open 493
+#define __NR_fanotify_init 494
+#define __NR_fanotify_mark 495
+#define __NR_prlimit64 496
+#define __NR_name_to_handle_at 497
+#define __NR_open_by_handle_at 498
+#define __NR_clock_adjtime 499
+#define __NR_syncfs 500
+#define __NR_setns 501
+#define __NR_accept4 502
+#define __NR_sendmmsg 503
+#define __NR_process_vm_readv 504
+#define __NR_process_vm_writev 505
+#define __NR_kcmp 506
+#define __NR_finit_module 507
+#define __NR_sched_setattr 508
+#define __NR_sched_getattr 509
+#define __NR_renameat2 510
+#define __NR_getrandom 511
+#define __NR_memfd_create 512
+#define __NR_execveat 513
+#define __NR_seccomp 514
+#define __NR_copy_file_range 515
+#define __NR_preadv2 516
+#define __NR_pwritev2 517
+#define __NR_statx 518
+
+#ifdef __KERNEL__
+#define __NR_syscalls 519
+#endif
+
+#endif /* _UAPI_ASM_SW64_UNISTD_64_H */
diff --git a/linux-user/meson.build b/linux-user/meson.build
index bf62c13e37..4f4196ed13 100644
--- a/linux-user/meson.build
+++ b/linux-user/meson.build
@@ -37,5 +37,6 @@ subdir('ppc')
subdir('s390x')
subdir('sh4')
subdir('sparc')
+subdir('sw64')
subdir('x86_64')
subdir('xtensa')
diff --git a/linux-user/sw64/cpu_loop.c b/linux-user/sw64/cpu_loop.c
new file mode 100644
index 0000000000..3f2fde0fba
--- /dev/null
+++ b/linux-user/sw64/cpu_loop.c
@@ -0,0 +1,108 @@
+/*
+ * qemu user cpu loop
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "cpu_loop-common.h"
+
+void cpu_loop(CPUSW64State *env)
+{
+ CPUState *cs = CPU(sw64_env_get_cpu(env));
+ int trapnr;
+ target_siginfo_t info;
+ abi_long sysret;
+
+ while (1) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+
+ switch (trapnr) {
+ case EXCP_OPCDEC:
+ cpu_abort(cs, "ILLEGAL SW64 insn at line %d!", __LINE__);
+ case EXCP_CALL_SYS:
+ switch (env->error_code) {
+ case 0x83:
+ /* CALLSYS */
+ trapnr = env->ir[IDX_V0];
+ sysret = do_syscall(env, trapnr,
+ env->ir[IDX_A0], env->ir[IDX_A1],
+ env->ir[IDX_A2], env->ir[IDX_A3],
+ env->ir[IDX_A4], env->ir[IDX_A5],
+ 0, 0);
+ if (sysret == -TARGET_ERESTARTSYS) {
+ env->pc -= 4;
+ break;
+ }
+ if (sysret == -TARGET_QEMU_ESIGRETURN) {
+ break;
+ }
+ /* Syscall writes 0 to V0 to bypass error check, similar
+ to how this is handled internal to Linux kernel.
+ (Ab)use trapnr temporarily as boolean indicating error. */
+ trapnr = (env->ir[IDX_V0] != 0 && sysret < 0);
+ env->ir[IDX_V0] = (trapnr ? -sysret : sysret);
+ env->ir[IDX_A3] = trapnr;
+ break;
+ default:
+ printf("UNDO sys_call %lx\n", env->error_code);
+ exit(-1);
+ }
+ break;
+ case EXCP_MMFAULT:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
+ ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
+ info._sifields._sigfault._addr = env->trap_arg0;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_ARITH:
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ info.si_code = TARGET_FPE_FLTINV;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ default:
+ cpu_abort(cs, "UNDO");
+ }
+ process_pending_signals (env);
+
+ /* Most of the traps imply a transition through HMcode, which
+ implies an REI instruction has been executed. Which means
+ that RX and LOCK_ADDR should be cleared. But there are a
+ few exceptions for traps internal to QEMU. */
+ }
+}
+
+void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+{
+ int i;
+
+ for(i = 0; i < 28; i++) {
+ env->ir[i] = ((abi_ulong *)regs)[i];
+ }
+ env->ir[IDX_SP] = regs->usp;
+ env->pc = regs->pc;
+}
diff --git a/linux-user/sw64/signal.c b/linux-user/sw64/signal.c
new file mode 100644
index 0000000000..5822e808d3
--- /dev/null
+++ b/linux-user/sw64/signal.c
@@ -0,0 +1,273 @@
+/*
+ * Emulation of Linux signals
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "signal-common.h"
+#include "linux-user/trace.h"
+
+struct target_sigcontext {
+ abi_long sc_onstack;
+ abi_long sc_mask;
+ abi_long sc_pc;
+ abi_long sc_ps;
+ abi_long sc_regs[32];
+ abi_long sc_ownedfp;
+ abi_long sc_fpregs[32];
+ abi_ulong sc_fpcr;
+ abi_ulong sc_fp_control;
+ abi_ulong sc_reserved1;
+ abi_ulong sc_reserved2;
+ abi_ulong sc_ssize;
+ abi_ulong sc_sbase;
+ abi_ulong sc_traparg_a0;
+ abi_ulong sc_traparg_a1;
+ abi_ulong sc_traparg_a2;
+ abi_ulong sc_fp_trap_pc;
+ abi_ulong sc_fp_trigger_sum;
+ abi_ulong sc_fp_trigger_inst;
+};
+
+struct target_ucontext {
+ abi_ulong tuc_flags;
+ abi_ulong tuc_link;
+ abi_ulong tuc_osf_sigmask;
+ target_stack_t tuc_stack;
+ struct target_sigcontext tuc_mcontext;
+ target_sigset_t tuc_sigmask;
+};
+
+struct target_sigframe {
+ struct target_sigcontext sc;
+ unsigned int retcode[3];
+};
+
+struct target_rt_sigframe {
+ target_siginfo_t info;
+ struct target_ucontext uc;
+ unsigned int retcode[3];
+};
+
+#define INSN_MOV_R30_R16 0x47fe0410
+#define INSN_LDI_R0 0x201f0000
+#define INSN_CALLSYS 0x00000083
+
+static void setup_sigcontext(struct target_sigcontext *sc, CPUSW64State *env,
+ abi_ulong frame_addr, target_sigset_t *set)
+{
+ int i;
+
+ __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
+ __put_user(set->sig[0], &sc->sc_mask);
+ __put_user(env->pc, &sc->sc_pc);
+ __put_user(8, &sc->sc_ps);
+
+ for (i = 0; i < 31; ++i) {
+ __put_user(env->ir[i], &sc->sc_regs[i]);
+ }
+ __put_user(0, &sc->sc_regs[31]);
+
+ for (i = 0; i < 31; ++i) {
+ __put_user(env->fr[i], &sc->sc_fpregs[i]);
+ }
+ __put_user(0, &sc->sc_fpregs[31]);
+ __put_user(cpu_sw64_load_fpcr(env), &sc->sc_fpcr);
+
+ __put_user(0, &sc->sc_traparg_a0); /* FIXME */
+ __put_user(0, &sc->sc_traparg_a1); /* FIXME */
+ __put_user(0, &sc->sc_traparg_a2); /* FIXME */
+}
+
+static void restore_sigcontext(CPUSW64State *env,
+ struct target_sigcontext *sc)
+{
+ uint64_t fpcr;
+ int i;
+
+ __get_user(env->pc, &sc->sc_pc);
+
+ for (i = 0; i < 31; ++i) {
+ __get_user(env->ir[i], &sc->sc_regs[i]);
+ }
+ for (i = 0; i < 31; ++i) {
+ __get_user(env->fr[i], &sc->sc_fpregs[i]);
+ }
+
+ __get_user(fpcr, &sc->sc_fpcr);
+ cpu_sw64_store_fpcr(env, fpcr);
+}
+
+static inline abi_ulong get_sigframe(struct target_sigaction *sa,
+ CPUSW64State *env,
+ unsigned long framesize)
+{
+ abi_ulong sp;
+
+ sp = target_sigsp(get_sp_from_cpustate(env), sa);
+
+ return (sp - framesize) & -32;
+}
+
+void setup_frame(int sig, struct target_sigaction *ka,
+ target_sigset_t *set, CPUSW64State *env)
+{
+ abi_ulong frame_addr, r26;
+ struct target_sigframe *frame;
+ int err = 0;
+
+ frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ trace_user_setup_frame(env, frame_addr);
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ goto give_sigsegv;
+ }
+
+ setup_sigcontext(&frame->sc, env, frame_addr, set);
+
+ if (ka->sa_restorer) {
+ r26 = ka->sa_restorer;
+ } else {
+ __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
+ __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
+ &frame->retcode[1]);
+ __put_user(INSN_CALLSYS, &frame->retcode[2]);
+ /* imb() */
+ r26 = frame_addr + offsetof(struct target_sigframe, retcode);
+ }
+
+ unlock_user_struct(frame, frame_addr, 1);
+
+ if (err) {
+give_sigsegv:
+ force_sigsegv(sig);
+ return;
+ }
+
+ env->ir[IDX_RA] = r26;
+ env->ir[IDX_PV] = env->pc = ka->_sa_handler;
+ env->ir[IDX_A0] = sig;
+ env->ir[IDX_A1] = 0;
+ env->ir[IDX_A2] = frame_addr + offsetof(struct target_sigframe, sc);
+ env->ir[IDX_SP] = frame_addr;
+}
+
+void setup_rt_frame(int sig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set, CPUSW64State *env)
+{
+ abi_ulong frame_addr, r26;
+ struct target_rt_sigframe *frame;
+ int i, err = 0;
+
+ frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ trace_user_setup_rt_frame(env, frame_addr);
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ goto give_sigsegv;
+ }
+
+ tswap_siginfo(&frame->info, info);
+
+ __put_user(0, &frame->uc.tuc_flags);
+ __put_user(0, &frame->uc.tuc_link);
+ __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
+
+ target_save_altstack(&frame->uc.tuc_stack, env);
+
+ setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
+ for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
+ __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
+ }
+
+ if (ka->sa_restorer) {
+ r26 = ka->sa_restorer;
+ } else {
+ __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
+ __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
+ &frame->retcode[1]);
+ __put_user(INSN_CALLSYS, &frame->retcode[2]);
+ r26 = frame_addr + offsetof(struct target_sigframe, retcode);
+ }
+
+ if (err) {
+give_sigsegv:
+ force_sigsegv(sig);
+ return;
+ }
+
+ env->ir[IDX_RA] = r26;
+ env->ir[IDX_PV] = env->pc = ka->_sa_handler;
+ env->ir[IDX_A0] = sig;
+ env->ir[IDX_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
+ env->ir[IDX_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
+ env->ir[IDX_SP] = frame_addr;
+}
+
+long do_sigreturn(CPUSW64State *env)
+{
+ struct target_sigcontext *sc;
+ abi_ulong sc_addr = env->ir[IDX_A0];
+ target_sigset_t target_set;
+ sigset_t set;
+
+ if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
+ goto badframe;
+ }
+
+ target_sigemptyset(&target_set);
+ __get_user(target_set.sig[0], &sc->sc_mask);
+
+ target_to_host_sigset_internal(&set, &target_set);
+ set_sigmask(&set);
+
+ restore_sigcontext(env, sc);
+ unlock_user_struct(sc, sc_addr, 0);
+ return -TARGET_QEMU_ESIGRETURN;
+
+badframe:
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
+}
+
+long do_rt_sigreturn(CPUSW64State *env)
+{
+ abi_ulong frame_addr = env->ir[IDX_A0];
+ struct target_rt_sigframe *frame;
+ sigset_t set;
+
+ trace_user_do_rt_sigreturn(env, frame_addr);
+ if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
+ goto badframe;
+ }
+ target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
+ set_sigmask(&set);
+
+ restore_sigcontext(env, &frame->uc.tuc_mcontext);
+ if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
+ uc.tuc_stack),
+ 0, env->ir[IDX_SP]) == -EFAULT) {
+ goto badframe;
+ }
+
+ unlock_user_struct(frame, frame_addr, 0);
+ return -TARGET_QEMU_ESIGRETURN;
+
+
+badframe:
+ unlock_user_struct(frame, frame_addr, 0);
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
+}
diff --git a/linux-user/sw64/sockbits.h b/linux-user/sw64/sockbits.h
new file mode 100644
index 0000000000..0e4c8f012d
--- /dev/null
+++ b/linux-user/sw64/sockbits.h
@@ -0,0 +1 @@
+#include "../generic/sockbits.h"
diff --git a/linux-user/sw64/syscall_nr.h b/linux-user/sw64/syscall_nr.h
new file mode 100644
index 0000000000..91737af322
--- /dev/null
+++ b/linux-user/sw64/syscall_nr.h
@@ -0,0 +1,471 @@
+/*
+ * This file contains the system call numbers.
+ */
+#define TARGET_NR_osf_syscall 0 /* not implemented */
+#define TARGET_NR_exit 1
+#define TARGET_NR_fork 2
+#define TARGET_NR_read 3
+#define TARGET_NR_write 4
+#define TARGET_NR_osf_old_open 5 /* not implemented */
+#define TARGET_NR_close 6
+#define TARGET_NR_osf_wait4 7
+#define TARGET_NR_osf_old_creat 8 /* not implemented */
+#define TARGET_NR_link 9
+#define TARGET_NR_unlink 10
+#define TARGET_NR_osf_execve 11 /* not implemented */
+#define TARGET_NR_chdir 12
+#define TARGET_NR_fchdir 13
+#define TARGET_NR_mknod 14
+#define TARGET_NR_chmod 15
+#define TARGET_NR_chown 16
+#define TARGET_NR_brk 17
+#define TARGET_NR_osf_getfsstat 18 /* not implemented */
+#define TARGET_NR_lseek 19
+#define TARGET_NR_getxpid 20
+#define TARGET_NR_osf_mount 21
+#define TARGET_NR_umount 22
+#define TARGET_NR_setuid 23
+#define TARGET_NR_getxuid 24
+#define TARGET_NR_exec_with_loader 25 /* not implemented */
+#define TARGET_NR_ptrace 26
+#define TARGET_NR_osf_nrecvmsg 27 /* not implemented */
+#define TARGET_NR_osf_nsendmsg 28 /* not implemented */
+#define TARGET_NR_osf_nrecvfrom 29 /* not implemented */
+#define TARGET_NR_osf_naccept 30 /* not implemented */
+#define TARGET_NR_osf_ngetpeername 31 /* not implemented */
+#define TARGET_NR_osf_ngetsockname 32 /* not implemented */
+#define TARGET_NR_access 33
+#define TARGET_NR_osf_chflags 34 /* not implemented */
+#define TARGET_NR_osf_fchflags 35 /* not implemented */
+#define TARGET_NR_sync 36
+#define TARGET_NR_kill 37
+#define TARGET_NR_osf_old_stat 38 /* not implemented */
+#define TARGET_NR_setpgid 39
+#define TARGET_NR_osf_old_lstat 40 /* not implemented */
+#define TARGET_NR_dup 41
+#define TARGET_NR_pipe 42
+#define TARGET_NR_osf_set_program_attributes 43
+#define TARGET_NR_osf_profil 44 /* not implemented */
+#define TARGET_NR_open 45
+#define TARGET_NR_osf_old_sigaction 46 /* not implemented */
+#define TARGET_NR_getxgid 47
+#define TARGET_NR_osf_sigprocmask 48
+#define TARGET_NR_osf_getlogin 49 /* not implemented */
+#define TARGET_NR_osf_setlogin 50 /* not implemented */
+#define TARGET_NR_acct 51
+#define TARGET_NR_sigpending 52
+
+#define TARGET_NR_ioctl 54
+#define TARGET_NR_osf_reboot 55 /* not implemented */
+#define TARGET_NR_osf_revoke 56 /* not implemented */
+#define TARGET_NR_symlink 57
+#define TARGET_NR_readlink 58
+#define TARGET_NR_execve 59
+#define TARGET_NR_umask 60
+#define TARGET_NR_chroot 61
+#define TARGET_NR_osf_old_fstat 62 /* not implemented */
+#define TARGET_NR_getpgrp 63
+#define TARGET_NR_getpagesize 64
+#define TARGET_NR_osf_mremap 65 /* not implemented */
+#define TARGET_NR_vfork 66
+#define TARGET_NR_stat 67
+#define TARGET_NR_lstat 68
+#define TARGET_NR_osf_sbrk 69 /* not implemented */
+#define TARGET_NR_osf_sstk 70 /* not implemented */
+#define TARGET_NR_mmap 71 /* OSF/1 mmap is superset of Linux */
+#define TARGET_NR_osf_old_vadvise 72 /* not implemented */
+#define TARGET_NR_munmap 73
+#define TARGET_NR_mprotect 74
+#define TARGET_NR_madvise 75
+#define TARGET_NR_vhangup 76
+#define TARGET_NR_osf_kmodcall 77 /* not implemented */
+#define TARGET_NR_osf_mincore 78 /* not implemented */
+#define TARGET_NR_getgroups 79
+#define TARGET_NR_setgroups 80
+#define TARGET_NR_osf_old_getpgrp 81 /* not implemented */
+#define TARGET_NR_setpgrp 82 /* BSD alias for setpgid */
+#define TARGET_NR_osf_setitimer 83
+#define TARGET_NR_osf_old_wait 84 /* not implemented */
+#define TARGET_NR_osf_table 85 /* not implemented */
+#define TARGET_NR_osf_getitimer 86
+#define TARGET_NR_gethostname 87
+#define TARGET_NR_sethostname 88
+#define TARGET_NR_getdtablesize 89
+#define TARGET_NR_dup2 90
+#define TARGET_NR_fstat 91
+#define TARGET_NR_fcntl 92
+#define TARGET_NR_osf_select 93
+#define TARGET_NR_poll 94
+#define TARGET_NR_fsync 95
+#define TARGET_NR_setpriority 96
+#define TARGET_NR_socket 97
+#define TARGET_NR_connect 98
+#define TARGET_NR_accept 99
+#define TARGET_NR_getpriority 100
+#define TARGET_NR_send 101
+#define TARGET_NR_recv 102
+#define TARGET_NR_sigreturn 103
+#define TARGET_NR_bind 104
+#define TARGET_NR_setsockopt 105
+#define TARGET_NR_listen 106
+#define TARGET_NR_osf_plock 107 /* not implemented */
+#define TARGET_NR_osf_old_sigvec 108 /* not implemented */
+#define TARGET_NR_osf_old_sigblock 109 /* not implemented */
+#define TARGET_NR_osf_old_sigsetmask 110 /* not implemented */
+#define TARGET_NR_sigsuspend 111
+#define TARGET_NR_osf_sigstack 112
+#define TARGET_NR_recvmsg 113
+#define TARGET_NR_sendmsg 114
+#define TARGET_NR_osf_old_vtrace 115 /* not implemented */
+#define TARGET_NR_osf_gettimeofday 116
+#define TARGET_NR_osf_getrusage 117
+#define TARGET_NR_getsockopt 118
+
+#define TARGET_NR_readv 120
+#define TARGET_NR_writev 121
+#define TARGET_NR_osf_settimeofday 122
+#define TARGET_NR_fchown 123
+#define TARGET_NR_fchmod 124
+#define TARGET_NR_recvfrom 125
+#define TARGET_NR_setreuid 126
+#define TARGET_NR_setregid 127
+#define TARGET_NR_rename 128
+#define TARGET_NR_truncate 129
+#define TARGET_NR_ftruncate 130
+#define TARGET_NR_flock 131
+#define TARGET_NR_setgid 132
+#define TARGET_NR_sendto 133
+#define TARGET_NR_shutdown 134
+#define TARGET_NR_socketpair 135
+#define TARGET_NR_mkdir 136
+#define TARGET_NR_rmdir 137
+#define TARGET_NR_osf_utimes 138
+#define TARGET_NR_osf_old_sigreturn 139 /* not implemented */
+#define TARGET_NR_osf_adjtime 140 /* not implemented */
+#define TARGET_NR_getpeername 141
+#define TARGET_NR_osf_gethostid 142 /* not implemented */
+#define TARGET_NR_osf_sethostid 143 /* not implemented */
+#define TARGET_NR_getrlimit 144
+#define TARGET_NR_setrlimit 145
+#define TARGET_NR_osf_old_killpg 146 /* not implemented */
+#define TARGET_NR_setsid 147
+#define TARGET_NR_quotactl 148
+#define TARGET_NR_osf_oldquota 149 /* not implemented */
+#define TARGET_NR_getsockname 150
+
+#define TARGET_NR_osf_pid_block 153 /* not implemented */
+#define TARGET_NR_osf_pid_unblock 154 /* not implemented */
+
+#define TARGET_NR_sigaction 156
+#define TARGET_NR_osf_sigwaitprim 157 /* not implemented */
+#define TARGET_NR_osf_nfssvc 158 /* not implemented */
+#define TARGET_NR_osf_getdirentries 159
+#define TARGET_NR_osf_statfs 160
+#define TARGET_NR_osf_fstatfs 161
+
+#define TARGET_NR_osf_asynch_daemon 163 /* not implemented */
+#define TARGET_NR_osf_getfh 164 /* not implemented */
+#define TARGET_NR_osf_getdomainname 165
+#define TARGET_NR_setdomainname 166
+
+#define TARGET_NR_osf_exportfs 169 /* not implemented */
+
+#define TARGET_NR_osf_alt_plock 181 /* not implemented */
+
+#define TARGET_NR_osf_getmnt 184 /* not implemented */
+
+#define TARGET_NR_osf_alt_sigpending 187 /* not implemented */
+#define TARGET_NR_osf_alt_setsid 188 /* not implemented */
+
+#define TARGET_NR_osf_swapon 199
+#define TARGET_NR_msgctl 200
+#define TARGET_NR_msgget 201
+#define TARGET_NR_msgrcv 202
+#define TARGET_NR_msgsnd 203
+#define TARGET_NR_semctl 204
+#define TARGET_NR_semget 205
+#define TARGET_NR_semop 206
+#define TARGET_NR_osf_utsname 207
+#define TARGET_NR_lchown 208
+#define TARGET_NR_osf_shmat 209
+#define TARGET_NR_shmctl 210
+#define TARGET_NR_shmdt 211
+#define TARGET_NR_shmget 212
+#define TARGET_NR_osf_mvalid 213 /* not implemented */
+#define TARGET_NR_osf_getaddressconf 214 /* not implemented */
+#define TARGET_NR_osf_msleep 215 /* not implemented */
+#define TARGET_NR_osf_mwakeup 216 /* not implemented */
+#define TARGET_NR_msync 217
+#define TARGET_NR_osf_signal 218 /* not implemented */
+#define TARGET_NR_osf_utc_gettime 219 /* not implemented */
+#define TARGET_NR_osf_utc_adjtime 220 /* not implemented */
+
+#define TARGET_NR_osf_security 222 /* not implemented */
+#define TARGET_NR_osf_kloadcall 223 /* not implemented */
+
+#define TARGET_NR_osf_stat 224
+#define TARGET_NR_osf_lstat 225
+#define TARGET_NR_osf_fstat 226
+#define TARGET_NR_osf_statfs64 227
+#define TARGET_NR_osf_fstatfs64 228
+
+#define TARGET_NR_getpgid 233
+#define TARGET_NR_getsid 234
+#define TARGET_NR_sigaltstack 235
+#define TARGET_NR_osf_waitid 236 /* not implemented */
+#define TARGET_NR_osf_priocntlset 237 /* not implemented */
+#define TARGET_NR_osf_sigsendset 238 /* not implemented */
+#define TARGET_NR_osf_set_speculative 239 /* not implemented */
+#define TARGET_NR_osf_msfs_syscall 240 /* not implemented */
+#define TARGET_NR_osf_sysinfo 241
+#define TARGET_NR_osf_uadmin 242 /* not implemented */
+#define TARGET_NR_osf_fuser 243 /* not implemented */
+#define TARGET_NR_osf_proplist_syscall 244
+#define TARGET_NR_osf_ntp_adjtime 245 /* not implemented */
+#define TARGET_NR_osf_ntp_gettime 246 /* not implemented */
+#define TARGET_NR_osf_pathconf 247 /* not implemented */
+#define TARGET_NR_osf_fpathconf 248 /* not implemented */
+
+#define TARGET_NR_osf_uswitch 250 /* not implemented */
+#define TARGET_NR_osf_usleep_thread 251
+#define TARGET_NR_osf_audcntl 252 /* not implemented */
+#define TARGET_NR_osf_audgen 253 /* not implemented */
+#define TARGET_NR_sysfs 254
+#define TARGET_NR_osf_subsys_info 255 /* not implemented */
+#define TARGET_NR_osf_getsysinfo 256
+#define TARGET_NR_osf_setsysinfo 257
+#define TARGET_NR_osf_afs_syscall 258 /* not implemented */
+#define TARGET_NR_osf_swapctl 259 /* not implemented */
+#define TARGET_NR_osf_memcntl 260 /* not implemented */
+#define TARGET_NR_osf_fdatasync 261 /* not implemented */
+
+/*
+ * Ignore legacy syscalls that we don't use.
+ */
+#define TARGET_IGNORE_alarm
+#define TARGET_IGNORE_creat
+#define TARGET_IGNORE_getegid
+#define TARGET_IGNORE_geteuid
+#define TARGET_IGNORE_getgid
+#define TARGET_IGNORE_getpid
+#define TARGET_IGNORE_getppid
+#define TARGET_IGNORE_getuid
+#define TARGET_IGNORE_pause
+#define TARGET_IGNORE_time
+#define TARGET_IGNORE_utime
+#define TARGET_IGNORE_umount2
+
+/*
+ * Linux-specific system calls begin at 300
+ */
+#define TARGET_NR_bdflush 300
+#define TARGET_NR_sethae 301
+#define TARGET_NR_mount 302
+#define TARGET_NR_old_adjtimex 303
+#define TARGET_NR_swapoff 304
+#define TARGET_NR_getdents 305
+#define TARGET_NR_create_module 306
+#define TARGET_NR_init_module 307
+#define TARGET_NR_delete_module 308
+#define TARGET_NR_get_kernel_syms 309
+#define TARGET_NR_syslog 310
+#define TARGET_NR_reboot 311
+#define TARGET_NR_clone 312
+#define TARGET_NR_uselib 313
+#define TARGET_NR_mlock 314
+#define TARGET_NR_munlock 315
+#define TARGET_NR_mlockall 316
+#define TARGET_NR_munlockall 317
+#define TARGET_NR_sysinfo 318
+#define TARGET_NR__sysctl 319
+/* 320 was sysTARGETidle. */
+#define TARGET_NR_oldumount 321
+#define TARGET_NR_swapon 322
+#define TARGET_NR_times 323
+#define TARGET_NR_personality 324
+#define TARGET_NR_setfsuid 325
+#define TARGET_NR_setfsgid 326
+#define TARGET_NR_ustat 327
+#define TARGET_NR_statfs 328
+#define TARGET_NR_fstatfs 329
+#define TARGET_NR_sched_setparam 330
+#define TARGET_NR_sched_getparam 331
+#define TARGET_NR_sched_setscheduler 332
+#define TARGET_NR_sched_getscheduler 333
+#define TARGET_NR_sched_yield 334
+#define TARGET_NR_sched_get_priority_max 335
+#define TARGET_NR_sched_get_priority_min 336
+#define TARGET_NR_sched_rr_get_interval 337
+#define TARGET_NR_afs_syscall 338
+#define TARGET_NR_uname 339
+#define TARGET_NR_nanosleep 340
+#define TARGET_NR_mremap 341
+#define TARGET_NR_nfsservctl 342
+#define TARGET_NR_setresuid 343
+#define TARGET_NR_getresuid 344
+#define TARGET_NR_pciconfig_read 345
+#define TARGET_NR_pciconfig_write 346
+#define TARGET_NR_query_module 347
+#define TARGET_NR_prctl 348
+#define TARGET_NR_pread64 349
+#define TARGET_NR_pwrite64 350
+#define TARGET_NR_rt_sigreturn 351
+#define TARGET_NR_rt_sigaction 352
+#define TARGET_NR_rt_sigprocmask 353
+#define TARGET_NR_rt_sigpending 354
+#define TARGET_NR_rt_sigtimedwait 355
+#define TARGET_NR_rt_sigqueueinfo 356
+#define TARGET_NR_rt_sigsuspend 357
+#define TARGET_NR_select 358
+#define TARGET_NR_gettimeofday 359
+#define TARGET_NR_settimeofday 360
+#define TARGET_NR_getitimer 361
+#define TARGET_NR_setitimer 362
+#define TARGET_NR_utimes 363
+#define TARGET_NR_getrusage 364
+#define TARGET_NR_wait4 365
+#define TARGET_NR_adjtimex 366
+#define TARGET_NR_getcwd 367
+#define TARGET_NR_capget 368
+#define TARGET_NR_capset 369
+#define TARGET_NR_sendfile 370
+#define TARGET_NR_setresgid 371
+#define TARGET_NR_getresgid 372
+#define TARGET_NR_dipc 373
+#define TARGET_NR_pivot_root 374
+#define TARGET_NR_mincore 375
+#define TARGET_NR_pciconfig_iobase 376
+#define TARGET_NR_getdents64 377
+#define TARGET_NR_gettid 378
+#define TARGET_NR_readahead 379
+/* 380 is unused */
+#define TARGET_NR_tkill 381
+#define TARGET_NR_setxattr 382
+#define TARGET_NR_lsetxattr 383
+#define TARGET_NR_fsetxattr 384
+#define TARGET_NR_getxattr 385
+#define TARGET_NR_lgetxattr 386
+#define TARGET_NR_fgetxattr 387
+#define TARGET_NR_listxattr 388
+#define TARGET_NR_llistxattr 389
+#define TARGET_NR_flistxattr 390
+#define TARGET_NR_removexattr 391
+#define TARGET_NR_lremovexattr 392
+#define TARGET_NR_fremovexattr 393
+#define TARGET_NR_futex 394
+#define TARGET_NR_sched_setaffinity 395
+#define TARGET_NR_sched_getaffinity 396
+#define TARGET_NR_tuxcall 397
+#define TARGET_NR_io_setup 398
+#define TARGET_NR_io_destroy 399
+#define TARGET_NR_io_getevents 400
+#define TARGET_NR_io_submit 401
+#define TARGET_NR_io_cancel 402
+#define TARGET_NR_exit_group 405
+#define TARGET_NR_lookup_dcookie 406
+#define TARGET_NR_epoll_create 407
+#define TARGET_NR_epoll_ctl 408
+#define TARGET_NR_epoll_wait 409
+/* Feb 2007: These three sysTARGETepoll defines shouldn't be here but culling
+ * them would break userspace apps ... we'll kill them off in 2010 :) */
+#define TARGET_NR_sys_epoll_create TARGET_NR_epoll_create
+#define TARGET_NR_sys_epoll_ctl TARGET_NR_epoll_ctl
+#define TARGET_NR_sys_epoll_wait TARGET_NR_epoll_wait
+#define TARGET_NR_remap_file_pages 410
+#define TARGET_NR_set_tid_address 411
+#define TARGET_NR_restart_syscall 412
+#define TARGET_NR_fadvise64 413
+#define TARGET_NR_timer_create 414
+#define TARGET_NR_timer_settime 415
+#define TARGET_NR_timer_gettime 416
+#define TARGET_NR_timer_getoverrun 417
+#define TARGET_NR_timer_delete 418
+#define TARGET_NR_clock_settime 419
+#define TARGET_NR_clock_gettime 420
+#define TARGET_NR_clock_getres 421
+#define TARGET_NR_clock_nanosleep 422
+#define TARGET_NR_semtimedop 423
+#define TARGET_NR_tgkill 424
+#define TARGET_NR_stat64 425
+#define TARGET_NR_lstat64 426
+#define TARGET_NR_fstat64 427
+#define TARGET_NR_vserver 428
+#define TARGET_NR_mbind 429
+#define TARGET_NR_get_mempolicy 430
+#define TARGET_NR_set_mempolicy 431
+#define TARGET_NR_mq_open 432
+#define TARGET_NR_mq_unlink 433
+#define TARGET_NR_mq_timedsend 434
+#define TARGET_NR_mq_timedreceive 435
+#define TARGET_NR_mq_notify 436
+#define TARGET_NR_mq_getsetattr 437
+#define TARGET_NR_waitid 438
+#define TARGET_NR_add_key 439
+#define TARGET_NR_request_key 440
+#define TARGET_NR_keyctl 441
+#define TARGET_NR_ioprio_set 442
+#define TARGET_NR_ioprio_get 443
+#define TARGET_NR_inotify_init 444
+#define TARGET_NR_inotify_add_watch 445
+#define TARGET_NR_inotify_rm_watch 446
+#define TARGET_NR_fdatasync 447
+#define TARGET_NR_kexec_load 448
+#define TARGET_NR_migrate_pages 449
+#define TARGET_NR_openat 450
+#define TARGET_NR_mkdirat 451
+#define TARGET_NR_mknodat 452
+#define TARGET_NR_fchownat 453
+#define TARGET_NR_futimesat 454
+#define TARGET_NR_fstatat64 455
+#define TARGET_NR_unlinkat 456
+#define TARGET_NR_renameat 457
+#define TARGET_NR_linkat 458
+#define TARGET_NR_symlinkat 459
+#define TARGET_NR_readlinkat 460
+#define TARGET_NR_fchmodat 461
+#define TARGET_NR_faccessat 462
+#define TARGET_NR_pselect6 463
+#define TARGET_NR_ppoll 464
+#define TARGET_NR_unshare 465
+#define TARGET_NR_set_robust_list 466
+#define TARGET_NR_get_robust_list 467
+#define TARGET_NR_splice 468
+#define TARGET_NR_sync_file_range 469
+#define TARGET_NR_tee 470
+#define TARGET_NR_vmsplice 471
+#define TARGET_NR_move_pages 472
+#define TARGET_NR_getcpu 473
+#define TARGET_NR_epoll_pwait 474
+#define TARGET_NR_utimensat 475
+#define TARGET_NR_signalfd 476
+#define TARGET_NR_timerfd 477
+#define TARGET_NR_eventfd 478
+#define TARGET_NR_recvmmsg 479
+#define TARGET_NR_fallocate 480
+#define TARGET_NR_timerfd_create 481
+#define TARGET_NR_timerfd_settime 482
+#define TARGET_NR_timerfd_gettime 483
+#define TARGET_NR_signalfd4 484
+#define TARGET_NR_eventfd2 485
+#define TARGET_NR_epoll_create1 486
+#define TARGET_NR_dup3 487
+#define TARGET_NR_pipe2 488
+#define TARGET_NR_inotify_init1 489
+#define TARGET_NR_preadv 490
+#define TARGET_NR_pwritev 491
+#define TARGET_NR_rt_tgsigqueueinfo 492
+#define TARGET_NR_perf_event_open 493
+#define TARGET_NR_fanotify_init 494
+#define TARGET_NR_fanotify_mark 495
+#define TARGET_NR_prlimit64 496
+#define TARGET_NR_name_to_handle_at 497
+#define TARGET_NR_open_by_handle_at 498
+#define TARGET_NR_clock_adjtime 499
+#define TARGET_NR_syncfs 500
+#define TARGET_NR_setns 501
+#define TARGET_NR_accept4 502
+#define TARGET_NR_sendmmsg 503
+#define TARGET_NR_process_vm_readv 504
+#define TARGET_NR_process_vm_writev 505
+#define TARGET_NR_sw_slave_rwperfmons 506
+#define TARGET_NR_sys_get_vmflags 507
diff --git a/linux-user/sw64/target_cpu.h b/linux-user/sw64/target_cpu.h
new file mode 100644
index 0000000000..1b87c8ba6d
--- /dev/null
+++ b/linux-user/sw64/target_cpu.h
@@ -0,0 +1,38 @@
+/*
+ * SW64 specific CPU ABI and functions for linux-user
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef SW64_TARGET_CPU_H
+#define SW64_TARGET_CPU_H
+
+static inline void cpu_clone_regs(CPUSW64State *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->ir[IDX_SP] = newsp;
+ }
+ env->ir[IDX_V0] = 0;
+ env->ir[IDX_A3] = 0;
+}
+
+static inline void cpu_set_tls(CPUSW64State *env, target_ulong newtls)
+{
+ env->unique = newtls;
+}
+
+static inline abi_ulong get_sp_from_cpustate(CPUSW64State *state)
+{
+ return state->ir[IDX_SP];
+}
+#endif
diff --git a/linux-user/sw64/target_elf.h b/linux-user/sw64/target_elf.h
new file mode 100644
index 0000000000..be48b6dee3
--- /dev/null
+++ b/linux-user/sw64/target_elf.h
@@ -0,0 +1,14 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+
+#ifndef SW64_TARGET_ELF_H
+#define SW64_TARGET_ELF_H
+static inline const char *cpu_get_model(uint32_t eflags)
+{
+ return "any";
+}
+#endif
diff --git a/linux-user/sw64/target_fcntl.h b/linux-user/sw64/target_fcntl.h
new file mode 100644
index 0000000000..9721e3de39
--- /dev/null
+++ b/linux-user/sw64/target_fcntl.h
@@ -0,0 +1,11 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+
+#ifndef SW64_TARGET_FCNTL_H
+#define sw64_TARGET_FCNTL_H
+#include "../generic/fcntl.h"
+#endif
diff --git a/linux-user/sw64/target_signal.h b/linux-user/sw64/target_signal.h
new file mode 100644
index 0000000000..6393a7542f
--- /dev/null
+++ b/linux-user/sw64/target_signal.h
@@ -0,0 +1,98 @@
+#ifndef SW64_TARGET_SIGNAL_H
+#define SW64_TARGET_SIGNAL_H
+
+#include "cpu.h"
+
+#define TARGET_SIGHUP 1
+#define TARGET_SIGINT 2
+#define TARGET_SIGQUIT 3
+#define TARGET_SIGILL 4
+#define TARGET_SIGTRAP 5
+#define TARGET_SIGABRT 6
+#define TARGET_SIGSTKFLT 7 /* actually SIGEMT */
+#define TARGET_SIGFPE 8
+#define TARGET_SIGKILL 9
+#define TARGET_SIGBUS 10
+#define TARGET_SIGSEGV 11
+#define TARGET_SIGSYS 12
+#define TARGET_SIGPIPE 13
+#define TARGET_SIGALRM 14
+#define TARGET_SIGTERM 15
+#define TARGET_SIGURG 16
+#define TARGET_SIGSTOP 17
+#define TARGET_SIGTSTP 18
+#define TARGET_SIGCONT 19
+#define TARGET_SIGCHLD 20
+#define TARGET_SIGTTIN 21
+#define TARGET_SIGTTOU 22
+#define TARGET_SIGIO 23
+#define TARGET_SIGXCPU 24
+#define TARGET_SIGXFSZ 25
+#define TARGET_SIGVTALRM 26
+#define TARGET_SIGPROF 27
+#define TARGET_SIGWINCH 28
+#define TARGET_SIGPWR 29 /* actually SIGINFO */
+#define TARGET_SIGUSR1 30
+#define TARGET_SIGUSR2 31
+#define TARGET_SIGRTMIN 32
+
+#define TARGET_SIG_BLOCK 1
+#define TARGET_SIG_UNBLOCK 2
+#define TARGET_SIG_SETMASK 3
+
+/* this struct defines a stack used during syscall handling */
+
+typedef struct target_sigaltstack {
+ abi_ulong ss_sp;
+ int32_t ss_flags;
+ int32_t dummy;
+ abi_ulong ss_size;
+} target_stack_t;
+
+
+/*
+ * sigaltstack controls
+ */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_SA_ONSTACK 0x00000001
+#define TARGET_SA_RESTART 0x00000002
+#define TARGET_SA_NOCLDSTOP 0x00000004
+#define TARGET_SA_NODEFER 0x00000008
+#define TARGET_SA_RESETHAND 0x00000010
+#define TARGET_SA_NOCLDWAIT 0x00000020 /* not supported yet */
+#define TARGET_SA_SIGINFO 0x00000040
+
+#define TARGET_MINSIGSTKSZ 4096
+#define TARGET_SIGSTKSZ 16384
+
+/* From <asm/gentrap.h>. */
+#define TARGET_GEN_INTOVF -1 /* integer overflow */
+#define TARGET_GEN_INTDIV -2 /* integer division by zero */
+#define TARGET_GEN_FLTOVF -3 /* fp overflow */
+#define TARGET_GEN_FLTDIV -4 /* fp division by zero */
+#define TARGET_GEN_FLTUND -5 /* fp underflow */
+#define TARGET_GEN_FLTINV -6 /* invalid fp operand */
+#define TARGET_GEN_FLTINE -7 /* inexact fp operand */
+#define TARGET_GEN_DECOVF -8 /* decimal overflow (for COBOL??) */
+#define TARGET_GEN_DECDIV -9 /* decimal division by zero */
+#define TARGET_GEN_DECINV -10 /* invalid decimal operand */
+#define TARGET_GEN_ROPRAND -11 /* reserved operand */
+#define TARGET_GEN_ASSERTERR -12 /* assertion error */
+#define TARGET_GEN_NULPTRERR -13 /* null pointer error */
+#define TARGET_GEN_STKOVF -14 /* stack overflow */
+#define TARGET_GEN_STRLENERR -15 /* string length error */
+#define TARGET_GEN_SUBSTRERR -16 /* substring error */
+#define TARGET_GEN_RANGERR -17 /* range error */
+#define TARGET_GEN_SUBRNG -18
+#define TARGET_GEN_SUBRNG1 -19
+#define TARGET_GEN_SUBRNG2 -20
+#define TARGET_GEN_SUBRNG3 -21
+#define TARGET_GEN_SUBRNG4 -22
+#define TARGET_GEN_SUBRNG5 -23
+#define TARGET_GEN_SUBRNG6 -24
+#define TARGET_GEN_SUBRNG7 -25
+
+#define TARGET_ARCH_HAS_SETUP_FRAME
+#endif /* SW64_TARGET_SIGNAL_H */
diff --git a/linux-user/sw64/target_structs.h b/linux-user/sw64/target_structs.h
new file mode 100644
index 0000000000..7c13dc4bac
--- /dev/null
+++ b/linux-user/sw64/target_structs.h
@@ -0,0 +1,47 @@
+/*
+ * SW64 specific structures for linux-user
+ *
+ * Copyright (c) 2018 Lin Hainan
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ */
+#ifndef SW64_TARGET_STRUCTS_H
+#define SW64_TARGET_STRUCTS_H
+
+/* TODO: Maybe it should be update. now it's different from other arch */
+struct target_ipc_perm {
+ abi_int __key; /* Key. */
+ abi_uint uid; /* Owner's user ID. */
+ abi_uint gid; /* Owner's group ID. */
+ abi_uint cuid; /* Creator's user ID. */
+ abi_uint cgid; /* Creator's group ID. */
+ abi_uint mode; /* Read/write permission. */
+ abi_ushort __seq; /* Sequence number. */
+ abi_ushort __pad1;
+ abi_ulong __unused1;
+ abi_ulong __unused2;
+};
+
+struct target_shmid_ds {
+ struct target_ipc_perm shm_perm; /* operation permission struct */
+ abi_long shm_segsz; /* size of segment in bytes */
+ abi_ulong shm_atime; /* time of last shmat() */
+ abi_ulong shm_dtime; /* time of last shmdt() */
+ abi_ulong shm_ctime; /* time of last change by shmctl() */
+ abi_int shm_cpid; /* pid of creator */
+ abi_int shm_lpid; /* pid of last shmop */
+ abi_ulong shm_nattch; /* number of current attaches */
+ abi_ulong __unused1;
+ abi_ulong __unused2;
+};
+
+#endif
diff --git a/linux-user/sw64/target_syscall.h b/linux-user/sw64/target_syscall.h
new file mode 100644
index 0000000000..c901ae95d8
--- /dev/null
+++ b/linux-user/sw64/target_syscall.h
@@ -0,0 +1,121 @@
+#ifndef SW64_TARGET_SYSCALL_H
+#define SW64_TARGET_SYSCALL_H
+
+/* TODO */
+struct target_pt_regs {
+ abi_ulong r0;
+ abi_ulong r1;
+ abi_ulong r2;
+ abi_ulong r3;
+ abi_ulong r4;
+ abi_ulong r5;
+ abi_ulong r6;
+ abi_ulong r7;
+ abi_ulong r8;
+ abi_ulong r19;
+ abi_ulong r20;
+ abi_ulong r21;
+ abi_ulong r22;
+ abi_ulong r23;
+ abi_ulong r24;
+ abi_ulong r25;
+ abi_ulong r26;
+ abi_ulong r27;
+ abi_ulong r28;
+ abi_ulong hae;
+/* JRP - These are the values provided to a0-a2 by HMcode */
+ abi_ulong trap_a0;
+ abi_ulong trap_a1;
+ abi_ulong trap_a2;
+/* These are saved by HMcode: */
+ abi_ulong ps;
+ abi_ulong pc;
+ abi_ulong gp;
+ abi_ulong r16;
+ abi_ulong r17;
+ abi_ulong r18;
+};
+
+#define TARGET_MLOCKALL_MCL_CURRENT 0x2000
+#define TARGET_MLOCKALL_MCL_FUTURE 0x4000
+
+
+#define UNAME_MACHINE "sw64"
+#define UNAME_MINIMUM_RELEASE "2.6.32"
+#undef TARGET_EOPNOTSUPP
+#define TARGET_EOPNOTSUPP 45 /* Operation not supported on transport endpoint */
+#define SWCR_STATUS_INV0 (1UL<<17)
+#define SWCR_STATUS_DZE0 (1UL<<18)
+#define SWCR_STATUS_OVF0 (1UL<<19)
+#define SWCR_STATUS_UNF0 (1UL<<20)
+#define SWCR_STATUS_INE0 (1UL<<21)
+#define SWCR_STATUS_DNO0 (1UL<<22)
+
+#define SWCR_STATUS_MASK0 (SWCR_STATUS_INV0 | SWCR_STATUS_DZE0 | \
+ SWCR_STATUS_OVF0 | SWCR_STATUS_UNF0 | \
+ SWCR_STATUS_INE0 | SWCR_STATUS_DNO0)
+
+#define SWCR_STATUS0_TO_EXCSUM_SHIFT 16
+
+#define SWCR_STATUS_INV1 (1UL<<23)
+#define SWCR_STATUS_DZE1 (1UL<<24)
+#define SWCR_STATUS_OVF1 (1UL<<25)
+#define SWCR_STATUS_UNF1 (1UL<<26)
+#define SWCR_STATUS_INE1 (1UL<<27)
+#define SWCR_STATUS_DNO1 (1UL<<28)
+
+#define SWCR_STATUS_MASK1 (SWCR_STATUS_INV1 | SWCR_STATUS_DZE1 | \
+ SWCR_STATUS_OVF1 | SWCR_STATUS_UNF1 | \
+ SWCR_STATUS_INE1 | SWCR_STATUS_DNO1)
+
+#define SWCR_STATUS1_TO_EXCSUM_SHIFT 22
+#define SWCR_STATUS_INV2 (1UL<<34)
+#define SWCR_STATUS_DZE2 (1UL<<35)
+#define SWCR_STATUS_OVF2 (1UL<<36)
+#define SWCR_STATUS_UNF2 (1UL<<37)
+#define SWCR_STATUS_INE2 (1UL<<38)
+#define SWCR_STATUS_DNO2 (1UL<<39)
+
+#define SWCR_STATUS_MASK2 (SWCR_STATUS_INV2 | SWCR_STATUS_DZE2 | \
+ SWCR_STATUS_OVF2 | SWCR_STATUS_UNF2 | \
+ SWCR_STATUS_INE2 | SWCR_STATUS_DNO2)
+#define SWCR_STATUS_INV3 (1UL<<40)
+#define SWCR_STATUS_DZE3 (1UL<<41)
+#define SWCR_STATUS_OVF3 (1UL<<42)
+#define SWCR_STATUS_UNF3 (1UL<<43)
+#define SWCR_STATUS_INE3 (1UL<<44)
+#define SWCR_STATUS_DNO3 (1UL<<45)
+
+#define SWCR_STATUS_MASK3 (SWCR_STATUS_INV3 | SWCR_STATUS_DZE3 | \
+ SWCR_STATUS_OVF3 | SWCR_STATUS_UNF3 | \
+ SWCR_STATUS_INE3 | SWCR_STATUS_DNO3)
+#define SWCR_TRAP_ENABLE_INV (1UL<<1) /* invalid op */
+#define SWCR_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */
+#define SWCR_TRAP_ENABLE_OVF (1UL<<3) /* overflow */
+#define SWCR_TRAP_ENABLE_UNF (1UL<<4) /* underflow */
+#define SWCR_TRAP_ENABLE_INE (1UL<<5) /* inexact */
+#define SWCR_TRAP_ENABLE_DNO (1UL<<6) /* denorm */
+#define SWCR_TRAP_ENABLE_MASK (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | \
+ SWCR_TRAP_ENABLE_OVF | SWCR_TRAP_ENABLE_UNF | \
+ SWCR_TRAP_ENABLE_INE | SWCR_TRAP_ENABLE_DNO)
+
+/* Denorm and Underflow flushing */
+#define SWCR_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */
+#define SWCR_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */
+
+#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ)
+
+/* status bits coming from fpcr: */
+#define SWCR_STATUS_INV (1UL<<17)
+#define SWCR_STATUS_DZE (1UL<<18)
+#define SWCR_STATUS_OVF (1UL<<19)
+#define SWCR_STATUS_UNF (1UL<<20)
+#define SWCR_STATUS_INE (1UL<<21)
+#define SWCR_STATUS_DNO (1UL<<22)
+
+#define SWCR_STATUS_MASK (SWCR_STATUS_INV | SWCR_STATUS_DZE | \
+ SWCR_STATUS_OVF | SWCR_STATUS_UNF | \
+ SWCR_STATUS_INE | SWCR_STATUS_DNO)
+#define TARGET_GSI_IEEE_FP_CONTROL 45
+#define TARGET_SSI_IEEE_FP_CONTROL 14
+#endif
diff --git a/linux-user/sw64/termbits.h b/linux-user/sw64/termbits.h
new file mode 100644
index 0000000000..37dd77120c
--- /dev/null
+++ b/linux-user/sw64/termbits.h
@@ -0,0 +1,265 @@
+typedef unsigned char target_cc_t;
+typedef unsigned int target_speed_t;
+typedef unsigned int target_tcflag_t;
+
+#define TARGET_NCCS 19
+struct target_termios {
+ target_tcflag_t c_iflag; /* input mode flags */
+ target_tcflag_t c_oflag; /* output mode flags */
+ target_tcflag_t c_cflag; /* control mode flags */
+ target_tcflag_t c_lflag; /* local mode flags */
+ target_cc_t c_cc[TARGET_NCCS]; /* control characters */
+ target_cc_t c_line; /* line discipline (== c_cc[19]) */
+ target_speed_t c_ispeed; /* input speed */
+ target_speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define TARGET_VEOF 0
+#define TARGET_VEOL 1
+#define TARGET_VEOL2 2
+#define TARGET_VERASE 3
+#define TARGET_VWERASE 4
+#define TARGET_VKILL 5
+#define TARGET_VREPRINT 6
+#define TARGET_VSWTC 7
+#define TARGET_VINTR 8
+#define TARGET_VQUIT 9
+#define TARGET_VSUSP 10
+#define TARGET_VSTART 12
+#define TARGET_VSTOP 13
+#define TARGET_VLNEXT 14
+#define TARGET_VDISCARD 15
+#define TARGET_VMIN 16
+#define TARGET_VTIME 17
+
+/* c_iflag bits */
+#define TARGET_IGNBRK 0000001
+#define TARGET_BRKINT 0000002
+#define TARGET_IGNPAR 0000004
+#define TARGET_PARMRK 0000010
+#define TARGET_INPCK 0000020
+#define TARGET_ISTRIP 0000040
+#define TARGET_INLCR 0000100
+#define TARGET_IGNCR 0000200
+#define TARGET_ICRNL 0000400
+#define TARGET_IXON 0001000
+#define TARGET_IXOFF 0002000
+#define TARGET_IXANY 0004000
+#define TARGET_IUCLC 0010000
+#define TARGET_IMAXBEL 0020000
+#define TARGET_IUTF8 0040000
+
+/* c_oflag bits */
+#define TARGET_OPOST 0000001
+#define TARGET_ONLCR 0000002
+#define TARGET_OLCUC 0000004
+
+#define TARGET_OCRNL 0000010
+#define TARGET_ONOCR 0000020
+#define TARGET_ONLRET 0000040
+
+#define TARGET_OFILL 00000100
+#define TARGET_OFDEL 00000200
+#define TARGET_NLDLY 00001400
+#define TARGET_NL0 00000000
+#define TARGET_NL1 00000400
+#define TARGET_NL2 00001000
+#define TARGET_NL3 00001400
+#define TARGET_TABDLY 00006000
+#define TARGET_TAB0 00000000
+#define TARGET_TAB1 00002000
+#define TARGET_TAB2 00004000
+#define TARGET_TAB3 00006000
+#define TARGET_CRDLY 00030000
+#define TARGET_CR0 00000000
+#define TARGET_CR1 00010000
+#define TARGET_CR2 00020000
+#define TARGET_CR3 00030000
+#define TARGET_FFDLY 00040000
+#define TARGET_FF0 00000000
+#define TARGET_FF1 00040000
+#define TARGET_BSDLY 00100000
+#define TARGET_BS0 00000000
+#define TARGET_BS1 00100000
+#define TARGET_VTDLY 00200000
+#define TARGET_VT0 00000000
+#define TARGET_VT1 00200000
+#define TARGET_XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */
+
+/* c_cflag bit meaning */
+#define TARGET_CBAUD 0000037
+#define TARGET_B0 0000000 /* hang up */
+#define TARGET_B50 0000001
+#define TARGET_B75 0000002
+#define TARGET_B110 0000003
+#define TARGET_B134 0000004
+#define TARGET_B150 0000005
+#define TARGET_B200 0000006
+#define TARGET_B300 0000007
+#define TARGET_B600 0000010
+#define TARGET_B1200 0000011
+#define TARGET_B1800 0000012
+#define TARGET_B2400 0000013
+#define TARGET_B4800 0000014
+#define TARGET_B9600 0000015
+#define TARGET_B19200 0000016
+#define TARGET_B38400 0000017
+#define TARGET_EXTA B19200
+#define TARGET_EXTB B38400
+#define TARGET_CBAUDEX 0000000
+#define TARGET_B57600 00020
+#define TARGET_B115200 00021
+#define TARGET_B230400 00022
+#define TARGET_B460800 00023
+#define TARGET_B500000 00024
+#define TARGET_B576000 00025
+#define TARGET_B921600 00026
+#define TARGET_B1000000 00027
+#define TARGET_B1152000 00030
+#define TARGET_B1500000 00031
+#define TARGET_B2000000 00032
+#define TARGET_B2500000 00033
+#define TARGET_B3000000 00034
+#define TARGET_B3500000 00035
+#define TARGET_B4000000 00036
+
+#define TARGET_CSIZE 00001400
+#define TARGET_CS5 00000000
+#define TARGET_CS6 00000400
+#define TARGET_CS7 00001000
+#define TARGET_CS8 00001400
+
+#define TARGET_CSTOPB 00002000
+#define TARGET_CREAD 00004000
+#define TARGET_PARENB 00010000
+#define TARGET_PARODD 00020000
+#define TARGET_HUPCL 00040000
+
+#define TARGET_CLOCAL 00100000
+#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
+#define TARGET_CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define TARGET_ISIG 0x00000080
+#define TARGET_ICANON 0x00000100
+#define TARGET_XCASE 0x00004000
+#define TARGET_ECHO 0x00000008
+#define TARGET_ECHOE 0x00000002
+#define TARGET_ECHOK 0x00000004
+#define TARGET_ECHONL 0x00000010
+#define TARGET_NOFLSH 0x80000000
+#define TARGET_TOSTOP 0x00400000
+#define TARGET_ECHOCTL 0x00000040
+#define TARGET_ECHOPRT 0x00000020
+#define TARGET_ECHOKE 0x00000001
+#define TARGET_FLUSHO 0x00800000
+#define TARGET_PENDIN 0x20000000
+#define TARGET_IEXTEN 0x00000400
+
+#define TARGET_FIOCLEX TARGET_IO('f', 1)
+#define TARGET_FIONCLEX TARGET_IO('f', 2)
+#define TARGET_FIOASYNC TARGET_IOW('f', 125, int)
+#define TARGET_FIONBIO TARGET_IOW('f', 126, int)
+#define TARGET_FIONREAD TARGET_IOR('f', 127, int)
+#define TARGET_TIOCINQ FIONREAD
+#define TARGET_FIOQSIZE TARGET_IOR('f', 128, loff_t)
+
+#define TARGET_TIOCGETP TARGET_IOR('t', 8, struct target_sgttyb)
+#define TARGET_TIOCSETP TARGET_IOW('t', 9, struct target_sgttyb)
+#define TARGET_TIOCSETN TARGET_IOW('t', 10, struct target_sgttyb) /* TIOCSETP wo flush */
+
+#define TARGET_TIOCSETC TARGET_IOW('t', 17, struct target_tchars)
+#define TARGET_TIOCGETC TARGET_IOR('t', 18, struct target_tchars)
+#define TARGET_TCGETS TARGET_IOR('t', 19, struct target_termios)
+#define TARGET_TCSETS TARGET_IOW('t', 20, struct target_termios)
+#define TARGET_TCSETSW TARGET_IOW('t', 21, struct target_termios)
+#define TARGET_TCSETSF TARGET_IOW('t', 22, struct target_termios)
+
+#define TARGET_TCGETA TARGET_IOR('t', 23, struct target_termio)
+#define TARGET_TCSETA TARGET_IOW('t', 24, struct target_termio)
+#define TARGET_TCSETAW TARGET_IOW('t', 25, struct target_termio)
+#define TARGET_TCSETAF TARGET_IOW('t', 28, struct target_termio)
+
+#define TARGET_TCSBRK TARGET_IO('t', 29)
+#define TARGET_TCXONC TARGET_IO('t', 30)
+#define TARGET_TCFLSH TARGET_IO('t', 31)
+
+#define TARGET_TIOCSWINSZ TARGET_IOW('t', 103, struct target_winsize)
+#define TARGET_TIOCGWINSZ TARGET_IOR('t', 104, struct target_winsize)
+#define TARGET_TIOCSTART TARGET_IO('t', 110) /* start output, like ^Q */
+#define TARGET_TIOCSTOP TARGET_IO('t', 111) /* stop output, like ^S */
+#define TARGET_TIOCOUTQ TARGET_IOR('t', 115, int) /* output queue size */
+
+#define TARGET_TIOCGLTC TARGET_IOR('t', 116, struct target_ltchars)
+#define TARGET_TIOCSLTC TARGET_IOW('t', 117, struct target_ltchars)
+#define TARGET_TIOCSPGRP TARGET_IOW('t', 118, int)
+#define TARGET_TIOCGPGRP TARGET_IOR('t', 119, int)
+
+#define TARGET_TIOCEXCL 0x540C
+#define TARGET_TIOCNXCL 0x540D
+#define TARGET_TIOCSCTTY 0x540E
+
+#define TARGET_TIOCSTI 0x5412
+#define TARGET_TIOCMGET 0x5415
+#define TARGET_TIOCMBIS 0x5416
+#define TARGET_TIOCMBIC 0x5417
+#define TARGET_TIOCMSET 0x5418
+# define TARGET_TIOCM_LE 0x001
+# define TARGET_TIOCM_DTR 0x002
+# define TARGET_TIOCM_RTS 0x004
+# define TARGET_TIOCM_ST 0x008
+# define TARGET_TIOCM_SR 0x010
+# define TARGET_TIOCM_CTS 0x020
+# define TARGET_TIOCM_CAR 0x040
+# define TARGET_TIOCM_RNG 0x080
+# define TARGET_TIOCM_DSR 0x100
+# define TARGET_TIOCM_CD TIOCM_CAR
+# define TARGET_TIOCM_RI TIOCM_RNG
+# define TARGET_TIOCM_OUT1 0x2000
+# define TARGET_TIOCM_OUT2 0x4000
+# define TARGET_TIOCM_LOOP 0x8000
+
+#define TARGET_TIOCGSOFTCAR 0x5419
+#define TARGET_TIOCSSOFTCAR 0x541A
+#define TARGET_TIOCLINUX 0x541C
+#define TARGET_TIOCCONS 0x541D
+#define TARGET_TIOCGSERIAL 0x541E
+#define TARGET_TIOCSSERIAL 0x541F
+#define TARGET_TIOCPKT 0x5420
+# define TARGET_TIOCPKT_DATA 0
+# define TARGET_TIOCPKT_FLUSHREAD 1
+# define TARGET_TIOCPKT_FLUSHWRITE 2
+# define TARGET_TIOCPKT_STOP 4
+# define TARGET_TIOCPKT_START 8
+# define TARGET_TIOCPKT_NOSTOP 16
+# define TARGET_TIOCPKT_DOSTOP 32
+
+
+#define TARGET_TIOCNOTTY 0x5422
+#define TARGET_TIOCSETD 0x5423
+#define TARGET_TIOCGETD 0x5424
+#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */
+#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */
+#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int) /* Lock/unlock Pty */
+#define TARGET_TIOCGPTPEER TARGET_IO('T', 0x41) /* Safely open the slave */
+
+#define TARGET_TIOCSERCONFIG 0x5453
+#define TARGET_TIOCSERGWILD 0x5454
+#define TARGET_TIOCSERSWILD 0x5455
+#define TARGET_TIOCGLCKTRMIOS 0x5456
+#define TARGET_TIOCSLCKTRMIOS 0x5457
+#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
+#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
diff --git a/meson.build b/meson.build
index 96de1a6ef9..d0bbceffe1 100644
--- a/meson.build
+++ b/meson.build
@@ -56,7 +56,7 @@ python = import('python').find_installation()
supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux']
supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv', 'x86', 'x86_64',
- 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64']
+ 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64', 'sw64']
cpu = host_machine.cpu_family()
@@ -65,6 +65,10 @@ if cpu in ['riscv32', 'riscv64']
cpu = 'riscv'
endif
+if cpu == 'sw_64'
+ cpu = 'sw64'
+endif
+
targetos = host_machine.system()
if cpu in ['x86', 'x86_64']
@@ -77,6 +81,8 @@ elif cpu in ['ppc', 'ppc64']
kvm_targets = ['ppc-softmmu', 'ppc64-softmmu']
elif cpu in ['mips', 'mips64']
kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu']
+elif cpu == 'sw64'
+ kvm_targets = ['sw64-softmmu']
else
kvm_targets = []
endif
@@ -359,6 +365,8 @@ if not get_option('tcg').disabled()
tcg_arch = 'i386'
elif config_host['ARCH'] == 'ppc64'
tcg_arch = 'ppc'
+ elif config_host['ARCH'] in ['sw64']
+ tcg_arch = 'sw64'
endif
add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch,
language: ['c', 'cpp', 'objc'])
@@ -1814,6 +1822,7 @@ disassemblers = {
'sh4' : ['CONFIG_SH4_DIS'],
'sparc' : ['CONFIG_SPARC_DIS'],
'xtensa' : ['CONFIG_XTENSA_DIS'],
+ 'sw64' : ['CONFIG_SW64_DIS'],
}
if link_language == 'cpp'
disassemblers += {
@@ -2466,6 +2475,7 @@ if have_system
'hw/sparc',
'hw/sparc64',
'hw/ssi',
+ 'hw/sw64',
'hw/timer',
'hw/tpm',
'hw/usb',
diff --git a/pc-bios/meson.build b/pc-bios/meson.build
index b40ff3f2bd..05e9065ad6 100644
--- a/pc-bios/meson.build
+++ b/pc-bios/meson.build
@@ -38,6 +38,9 @@ blobs = files(
'vgabios-ramfb.bin',
'vgabios-bochs-display.bin',
'vgabios-ati.bin',
+ 'uefi-bios-sw',
+ 'core3-reset',
+ 'core3-hmcode',
'openbios-sparc32',
'openbios-sparc64',
'openbios-ppc',
diff --git a/qapi/machine.json b/qapi/machine.json
index 6822cafe2e..6ed8488255 100644
--- a/qapi/machine.json
+++ b/qapi/machine.json
@@ -29,7 +29,7 @@
# Since: 3.0
##
{ 'enum' : 'SysEmuTarget',
- 'data' : [ 'aarch64', 'alpha', 'arm', 'avr', 'cris', 'hppa', 'i386',
+ 'data' : [ 'aarch64', 'alpha', 'sw64', 'arm', 'avr', 'cris', 'hppa', 'i386',
'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64',
'mips64el', 'mipsel', 'nios2', 'or1k', 'ppc',
'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4',
diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c
index 05e1d88d99..142352b24e 100644
--- a/softmmu/qdev-monitor.c
+++ b/softmmu/qdev-monitor.c
@@ -61,7 +61,8 @@ typedef struct QDevAlias
QEMU_ARCH_HPPA | QEMU_ARCH_I386 | \
QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \
QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \
- QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA)
+ QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA | \
+ QEMU_ARCH_SW64)
#define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X)
#define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K)
diff --git a/target/Kconfig b/target/Kconfig
index ae7f24fc66..a8d6cb1e97 100644
--- a/target/Kconfig
+++ b/target/Kconfig
@@ -17,3 +17,4 @@ source sh4/Kconfig
source sparc/Kconfig
source tricore/Kconfig
source xtensa/Kconfig
+source sw64/Kconfig
diff --git a/target/meson.build b/target/meson.build
index 2f6940255e..ec6bc97331 100644
--- a/target/meson.build
+++ b/target/meson.build
@@ -16,5 +16,6 @@ subdir('rx')
subdir('s390x')
subdir('sh4')
subdir('sparc')
+subdir('sw64')
subdir('tricore')
subdir('xtensa')
diff --git a/target/sw64/Kconfig b/target/sw64/Kconfig
new file mode 100644
index 0000000000..ad50b9677e
--- /dev/null
+++ b/target/sw64/Kconfig
@@ -0,0 +1,2 @@
+config SW64
+ bool
diff --git a/target/sw64/Makefile.objs b/target/sw64/Makefile.objs
new file mode 100644
index 0000000000..1e549d141c
--- /dev/null
+++ b/target/sw64/Makefile.objs
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SOFTMMU) += machine.o
+obj-y += cpu.o translate.o profile.o helper.o
+obj-y += int_helper.o float_helper.o simd_helper.o helper.o exception.o
+obj-$(CONFIG_KVM) += kvm.o
diff --git a/target/sw64/cpu-param.h b/target/sw64/cpu-param.h
new file mode 100644
index 0000000000..978a3cd572
--- /dev/null
+++ b/target/sw64/cpu-param.h
@@ -0,0 +1,24 @@
+/*
+ * SW64 cpu parameters for qemu.
+ *
+ * Copyright (c) 2018 Lin Hainan
+ */
+
+#ifndef SW64_CPU_PARAM_H
+#define SW64_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 64 /* if use th-1 ,TARGET_PAGE_BITS is 12 */
+#define TARGET_PAGE_BITS 13
+
+#ifdef CONFIG_USER_ONLY
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+#else
+#define TARGET_PHYS_ADDR_SPACE_BITS 48
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+#endif
+
+#ifndef CONFIG_USER_ONLY
+#define NB_MMU_MODES 4
+#endif
+
+#endif
diff --git a/target/sw64/cpu-qom.h b/target/sw64/cpu-qom.h
new file mode 100644
index 0000000000..b093c2bec8
--- /dev/null
+++ b/target/sw64/cpu-qom.h
@@ -0,0 +1,47 @@
+/*
+ * QEMU SW64 CPU
+ *
+ * Copyright (c) 2018 Lin Hainan
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef QEMU_SW64_CPU_QOM
+#define QEMU_SW64_CPU_QOM
+
+#include "hw/core/cpu.h"
+
+#define TYPE_SW64_CPU "sw64-cpu"
+
+#define SW64_CPU_CLASS(kclass) \
+ OBJECT_CLASS_CHECK(SW64CPUClass, (kclass), TYPE_SW64_CPU)
+#define SW64_CPU(obj) \
+ OBJECT_CHECK(SW64CPU, (obj), TYPE_SW64_CPU)
+#define SW64_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SW64CPUClass, (obj), TYPE_SW64_CPU)
+
+/**
+ * SW64CPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An SW64 CPU model.
+ */
+typedef struct SW64CPUClass {
+ /* private */
+ CPUClass parent_class;
+ /* public */
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+} SW64CPUClass;
+
+typedef struct SW64CPU SW64CPU;
+#endif
diff --git a/target/sw64/cpu.c b/target/sw64/cpu.c
new file mode 100644
index 0000000000..89c21850e1
--- /dev/null
+++ b/target/sw64/cpu.c
@@ -0,0 +1,457 @@
+/*
+ * QEMU SW64 CPU
+ *
+ * Copyright (c) 2018 Lin Hainan
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "disas/dis-asm.h"
+#include "kvm_sw64.h"
+#include "sysemu/reset.h"
+#include "hw/qdev-properties.h"
+
+
+static void sw64_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static void sw64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+#ifndef CONFIG_KVM
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ int i;
+
+ static const char ireg_names[31][4] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1",
+ "s2", "s3", "s4", "s5", "fp", "a0", "a1", "a2", "a3", "a4", "a5",
+ "t8", "t9", "t10", "t11", "ra", "t12", "at", "gp", "sp"};
+ static const char freg_names[128][4] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
+ "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
+ "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
+ "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17",
+ "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27",
+ "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5",
+ "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25",
+ "f26", "f27", "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3",
+ "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+ qemu_fprintf(f, "PC=%016" PRIx64 " SP=%016" PRIx64 "\n", env->pc,
+ env->ir[IDX_SP]);
+ for (i = 0; i < 31; i++) {
+ qemu_fprintf(f, "%s=%016" PRIx64, ireg_names[i], env->ir[i]);
+ if ((i % 4) == 3) {
+ qemu_fprintf(f, "\n");
+ } else {
+ qemu_fprintf(f, " ");
+ }
+ }
+ qemu_fprintf(f, "\n");
+#ifndef CONFIG_USER_ONLY
+ static const char sreg_names[10][4] = {"p1", "p2", "p4", "p5", "p6",
+ "p7", "p20", "p21", "p22", "p23"};
+ for (i = 0; i < 10; i++) {
+ qemu_fprintf(f, "%s=%016" PRIx64, sreg_names[i], env->sr[i]);
+ if ((i % 4) == 3) {
+ qemu_fprintf(f, "\n");
+ } else {
+ qemu_fprintf(f, " ");
+ }
+ }
+ qemu_fprintf(f, "\n");
+#endif
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, "%s=%016" PRIx64, freg_names[i + 96], env->fr[i + 96]);
+ qemu_fprintf(f, " %016" PRIx64, env->fr[i + 64]);
+ qemu_fprintf(f, " %016" PRIx64, env->fr[i + 32]);
+ qemu_fprintf(f, " %016" PRIx64, env->fr[i]);
+ qemu_fprintf(f, "\n");
+ }
+ qemu_fprintf(f, "\n");
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static void sw64_machine_cpu_reset(void *opaque)
+{
+ SW64CPU *cpu = opaque;
+
+ cpu_reset(CPU(cpu));
+}
+#endif
+
+static void sw64_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ SW64CPUClass *scc = SW64_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+#ifndef CONFIG_USER_ONLY
+ qemu_register_reset(sw64_machine_cpu_reset, cs);
+#endif
+
+ qemu_init_vcpu(cs);
+
+ scc->parent_realize(dev, errp);
+}
+
+static void sw64_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
+{
+ info->mach = bfd_mach_sw_64_core3;
+ info->print_insn = print_insn_sw_64;
+}
+
+#include "fpu/softfloat.h"
+
+static void core3_init(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ CPUSW64State *env = cs->env_ptr;
+#ifdef CONFIG_USER_ONLY
+ env->fpcr = 0x680e800000000000;
+ parallel_cpus = true;
+#endif
+ set_feature(env, SW64_FEATURE_CORE3);
+}
+
+static ObjectClass *sw64_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+ char **cpuname;
+
+ cpuname = g_strsplit(cpu_model, ",", 1);
+ typename = g_strdup_printf(SW64_CPU_TYPE_NAME("%s"), cpu_model);
+
+ oc = object_class_by_name(typename);
+ g_strfreev(cpuname);
+ g_free(typename);
+
+ if (oc && object_class_dynamic_cast(oc, TYPE_SW64_CPU) &&
+ !object_class_is_abstract(oc)) {
+ return oc;
+ }
+ return NULL;
+}
+
+bool sw64_cpu_has_work(CPUState *cs)
+{
+ /* If CPU has gotten into asleep(halt), then it may be
+ * wake up by hard interrupt, timer, ii, mail or mchk.
+ */
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER |
+ CPU_INTERRUPT_IIMAIL | CPU_INTERRUPT_MCHK);
+}
+
+static void sw64_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ SW64CPU *cpu = SW64_CPU(obj);
+ CPUSW64State *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+
+ cs->env_ptr = env;
+#ifndef CONFIG_USER_ONLY
+ env->flags = ENV_FLAG_HM_MODE;
+#else
+ env->flags = ENV_FLAG_PS_USER;
+#endif
+ tlb_flush(cs);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void sw64_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+#ifdef DEBUG_TRANS
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr, true);
+ }
+ fprintf(stderr, "PC = %lx, Wrong IO addr. Hwaddr = %lx, vaddr = %lx, access_type = %d\n",
+ env->pc, physaddr, addr, access_type);
+#endif
+}
+#endif
+
+#define a0(func) (((func & 0xFF) >> 6) & 0x1)
+#define a1(func) ((((func & 0xFF) >> 6) & 0x2) >> 1)
+
+#define t(func) ((a0(func) ^ a1(func)) & 0x1)
+#define b0(func) (t(func) | a0(func))
+#define b1(func) ((~t(func) & 1) | a1(func))
+
+#define START_SYS_CALL_ADDR(func) \
+ (b1(func) << 14) | (b0(func) << 13) | ((func & 0x3F) << 7)
+
+static void sw64_cpu_do_interrupt(CPUState *cs)
+{
+ int i = cs->exception_index;
+
+ cs->exception_index = -1;
+#if !defined(CONFIG_USER_ONLY)
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ switch (i) {
+ case EXCP_OPCDEC:
+ cpu_abort(cs, "ILLEGAL INSN");
+ break;
+ case EXCP_CALL_SYS:
+ i = START_SYS_CALL_ADDR(env->error_code);
+ if (i <= 0x3F) {
+ i += 0x4000;
+ } else if (i >= 0x40 && i <= 0x7F) {
+ i += 0x2000;
+ } else if (i >= 0x80 && i <= 0x8F) {
+ i += 0x6000;
+ }
+ break;
+ case EXCP_ARITH:
+ env->error_code = -1;
+ env->csr[EXC_PC] = env->pc - 4;
+ env->csr[EXC_SUM] = 1;
+ i = 0xB80;
+ break;
+ case EXCP_UNALIGN:
+ i = 0xB00;
+ env->csr[EXC_PC] = env->pc - 4;
+ break;
+ case EXCP_CLK_INTERRUPT:
+ case EXCP_DEV_INTERRUPT:
+ i = 0xE80;
+ break;
+ case EXCP_MMFAULT:
+ i = 0x980;
+ env->csr[EXC_PC] = env->pc;
+ break;
+ case EXCP_IIMAIL:
+ env->csr[EXC_PC] = env->pc;
+ i = 0xE00;
+ break;
+ default:
+ break;
+ }
+ env->pc = env->hm_entry + i;
+ env->flags = ENV_FLAG_HM_MODE;
+#else
+ switch (i) {
+ case EXCP_OPCDEC:
+ cpu_abort(cs, "ILLEGAL INSN");
+ break;
+ case EXCP_CALL_SYS:
+ default:
+ break;
+ }
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool sw64_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ int idx = -1;
+ /* We never take interrupts while in Hardmode. */
+ if (env->flags & ENV_FLAG_HM_MODE)
+ return false;
+
+ if (interrupt_request & CPU_INTERRUPT_IIMAIL) {
+ idx = EXCP_IIMAIL;
+ env->csr[INT_STAT] |= 1UL << 6;
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
+ return false;
+ cs->interrupt_request &= ~CPU_INTERRUPT_IIMAIL;
+ goto done;
+ }
+
+ if (interrupt_request & CPU_INTERRUPT_TIMER) {
+ idx = EXCP_CLK_INTERRUPT;
+ env->csr[INT_STAT] |= 1UL << 4;
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
+ return false;
+ cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
+ goto done;
+ }
+
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ idx = EXCP_DEV_INTERRUPT;
+ env->csr[INT_STAT] |= 1UL << 12;
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
+ return false;
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ goto done;
+ }
+
+ if (interrupt_request & CPU_INTERRUPT_PCIE) {
+ idx = EXCP_DEV_INTERRUPT;
+ env->csr[INT_STAT] |= 1UL << 1;
+ env->csr[INT_PCI_INT] = 0x10;
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
+ return false;
+ cs->interrupt_request &= ~CPU_INTERRUPT_PCIE;
+ goto done;
+ }
+
+done:
+ if (idx >= 0) {
+ cs->exception_index = idx;
+ env->error_code = 0;
+ env->csr[EXC_PC] = env->pc;
+ sw64_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+#endif
+
+static void sw64_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ SW64CPU *cpu = SW64_CPU(s);
+ SW64CPUClass *scc = SW64_CPU_GET_CLASS(cpu);
+
+ scc->parent_reset(dev);
+
+#ifndef CONFIG_USER_ONLY
+ if (kvm_enabled()) {
+ kvm_sw64_reset_vcpu(cpu);
+ }
+#endif
+}
+
+static Property sw64_cpu_properties[] = {
+#ifdef CONFIG_USER_ONLY
+ /* apic_id = 0 by default for *-user, see commit 9886e834 */
+ DEFINE_PROP_UINT32("cid", SW64CPU, cid, 0),
+#else
+ DEFINE_PROP_UINT32("cid", SW64CPU, cid, 0xFFFFFFFF),
+#endif
+ DEFINE_PROP_END_OF_LIST()
+};
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps sw64_sysemu_ops = {
+ .get_phys_page_debug = sw64_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps sw64_tcg_ops = {
+#ifdef CONFIG_TCG
+ .initialize = sw64_translate_init,
+ .tlb_fill = sw64_cpu_tlb_fill,
+#endif /* CONFIG_TCG */
+
+#if !defined(CONFIG_USER_ONLY)
+ .do_unaligned_access = sw64_cpu_do_unaligned_access,
+ .cpu_exec_interrupt = sw64_cpu_exec_interrupt,
+ .do_transaction_failed = sw64_cpu_do_transaction_failed,
+#endif /* !CONFIG_USER_ONLY */
+ .do_interrupt = sw64_cpu_do_interrupt,
+};
+
+static void sw64_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ SW64CPUClass *scc = SW64_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, sw64_cpu_realizefn,
+ &scc->parent_realize);
+ device_class_set_parent_reset(dc, sw64_cpu_reset, &scc->parent_reset);
+ device_class_set_props(dc, sw64_cpu_properties);
+
+ cc->class_by_name = sw64_cpu_class_by_name;
+ dc->vmsd = &vmstate_sw64_cpu;
+ cc->has_work = sw64_cpu_has_work;
+ cc->set_pc = sw64_cpu_set_pc;
+ cc->disas_set_info = sw64_cpu_disas_set_info;
+ cc->dump_state = sw64_cpu_dump_state;
+ cc->tcg_ops = &sw64_tcg_ops;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &sw64_sysemu_ops;
+#endif
+}
+
+static const SW64CPUInfo sw64_cpus[] =
+{
+ {
+ .name = "core3",
+ .initfn = core3_init,
+ },
+ {
+ .name = NULL
+ },
+};
+
+static void cpu_register(const SW64CPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_SW64_CPU,
+ .instance_size = sizeof(SW64CPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(SW64CPUClass),
+ .class_init = info->class_init,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_SW64_CPU, info->name);
+ type_register(&type_info);
+ g_free((void*)type_info.name);
+}
+
+static const TypeInfo sw64_cpu_type_info = {
+ .name = TYPE_SW64_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(SW64CPU),
+ .instance_init = sw64_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(SW64CPUClass),
+ .class_init = sw64_cpu_class_init,
+};
+
+static void sw64_cpu_register_types(void)
+{
+ const SW64CPUInfo *info = sw64_cpus;
+
+ type_register_static(&sw64_cpu_type_info);
+
+ while (info->name) {
+ cpu_register(info);
+ info++;
+ }
+}
+
+type_init(sw64_cpu_register_types)
diff --git a/target/sw64/cpu.h b/target/sw64/cpu.h
new file mode 100644
index 0000000000..5a490e2b4a
--- /dev/null
+++ b/target/sw64/cpu.h
@@ -0,0 +1,406 @@
+/*
+ * SW64 emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2018 Lin Hainan
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ */
+#ifndef SW64_CPU_H
+#define SW64_CPU_H
+
+#include "cpu-qom.h"
+#include "fpu/softfloat.h"
+#include "profile.h"
+
+/* QEMU addressing/paging config */
+#define TARGET_PAGE_BITS 13
+#define TARGET_LONG_BITS 64
+#define TARGET_LEVEL_BITS 10
+//#define ALIGNED_ONLY
+
+#include "exec/cpu-defs.h"
+
+/* FIXME: LOCKFIX */
+#define SW64_FIXLOCK 1
+
+/* swcore processors have a weak memory model */
+#define TCG_GUEST_DEFAULT_MO (0)
+
+#define SOFTMMU 1
+
+#ifndef CONFIG_USER_ONLY
+#define MMU_MODE0_SUFFIX _phys
+#define MMU_MODE3_SUFFIX _user
+#define MMU_MODE2_SUFFIX _kernel
+#endif
+#define MMU_PHYS_IDX 0
+#define MMU_KERNEL_IDX 2
+#define MMU_USER_IDX 3
+
+/* FIXME:Bits 4 and 5 are the mmu mode. The VMS hmcode uses all 4 modes;
+ The Unix hmcode only uses bit 4. */
+#define PS_USER_MODE 8u
+
+#define ENV_FLAG_HM_SHIFT 0
+#define ENV_FLAG_PS_SHIFT 8
+#define ENV_FLAG_FEN_SHIFT 24
+
+#define ENV_FLAG_HM_MODE (1u << ENV_FLAG_HM_SHIFT)
+#define ENV_FLAG_PS_USER (PS_USER_MODE << ENV_FLAG_PS_SHIFT)
+#define ENV_FLAG_FEN (1u << ENV_FLAG_FEN_SHIFT)
+
+#define MCU_CLOCK 25000000
+
+typedef struct CPUSW64State CPUSW64State;
+typedef CPUSW64State CPUArchState;
+typedef SW64CPU ArchCPU;
+
+struct CPUSW64State {
+ uint64_t ir[32];
+ uint64_t fr[128];
+ uint64_t pc;
+ bool is_slave;
+
+ uint64_t csr[0x100];
+ uint64_t fpcr;
+ uint64_t fpcr_exc_enable;
+ uint8_t fpcr_round_mode;
+ uint8_t fpcr_flush_to_zero;
+
+ float_status fp_status;
+
+ uint64_t hm_entry;
+
+#if !defined(CONFIG_USER_ONLY)
+ uint64_t sr[10]; /* shadow regs 1,2,4-7,20-23 */
+#endif
+
+ uint32_t flags;
+ uint64_t error_code;
+ uint64_t unique;
+ uint64_t lock_addr;
+ uint64_t lock_valid;
+ uint64_t lock_flag;
+ uint64_t lock_success;
+#ifdef SW64_FIXLOCK
+ uint64_t lock_value;
+#endif
+
+ uint64_t trap_arg0;
+ uint64_t trap_arg1;
+ uint64_t trap_arg2;
+
+ uint64_t features;
+ uint64_t insn_count[537];
+
+ /* reserve for slave */
+ uint64_t ca[4];
+ uint64_t scala_gpr[64];
+ uint64_t vec_gpr[224];
+ uint64_t fpcr_base;
+ uint64_t fpcr_ext;
+ uint64_t pendding_flag;
+ uint64_t pendding_status;
+ uint64_t synr_pendding_status;
+ uint64_t sync_pendding_status;
+ uint8_t vlenma_idxa;
+ uint8_t stable;
+};
+#define SW64_FEATURE_CORE3 0x2
+
+static inline void set_feature(CPUSW64State *env, int feature)
+{
+ env->features |= feature;
+}
+
+/**
+ * SW64CPU:
+ * @env: #CPUSW64State
+ *
+ * An SW64 CPU
+ */
+struct SW64CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+ CPUNegativeOffsetState neg;
+ CPUSW64State env;
+
+ uint64_t k_regs[158];
+ uint64_t k_vcb[36];
+ QEMUTimer *alarm_timer;
+ target_ulong irq;
+ uint32_t cid;
+};
+
+enum {
+ IDX_V0 = 0,
+ IDX_T0 = 1,
+ IDX_T1 = 2,
+ IDX_T2 = 3,
+ IDX_T3 = 4,
+ IDX_T4 = 5,
+ IDX_T5 = 6,
+ IDX_T6 = 7,
+ IDX_T7 = 8,
+ IDX_S0 = 9,
+ IDX_S1 = 10,
+ IDX_S2 = 11,
+ IDX_S3 = 12,
+ IDX_S4 = 13,
+ IDX_S5 = 14,
+ IDX_S6 = 15,
+ IDX_FP = IDX_S6,
+ IDX_A0 = 16,
+ IDX_A1 = 17,
+ IDX_A2 = 18,
+ IDX_A3 = 19,
+ IDX_A4 = 20,
+ IDX_A5 = 21,
+ IDX_T8 = 22,
+ IDX_T9 = 23,
+ IDX_T10 = 24,
+ IDX_T11 = 25,
+ IDX_RA = 26,
+ IDX_T12 = 27,
+ IDX_PV = IDX_T12,
+ IDX_AT = 28,
+ IDX_GP = 29,
+ IDX_SP = 30,
+ IDX_ZERO = 31,
+};
+
+enum {
+ MM_K_TNV = 0x0,
+ MM_K_ACV = 0x1,
+ MM_K_FOR = 0x2,
+ MM_K_FOE = 0x3,
+ MM_K_FOW = 0x4
+};
+
+enum {
+ PTE_VALID = 0x0001,
+ PTE_FOR = 0x0002, /* used for page protection (fault on read) */
+ PTE_FOW = 0x0004, /* used for page protection (fault on write) */
+ PTE_FOE = 0x0008,
+ PTE_KS = 0x0010,
+ PTE_PSE = 0x0040,
+ PTE_GH = 0x0060,
+ PTE_HRE = 0x0100,
+ PTE_VRE = 0x0200,
+ PTE_KRE = 0x0400,
+ PTE_URE = 0x0800,
+ PTE_HWE = 0x1000,
+ PTE_VWE = 0x2000,
+ PTE_KWE = 0x4000,
+ PTE_UWE = 0x8000
+};
+
+static inline int cpu_mmu_index(CPUSW64State *env, bool ifetch)
+{
+ int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX;
+ if (env->flags & ENV_FLAG_HM_MODE) {
+ ret = MMU_PHYS_IDX;
+ }
+ return ret;
+}
+
+static inline SW64CPU *sw64_env_get_cpu(CPUSW64State *env)
+{
+ return container_of(env, SW64CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(sw64_env_get_cpu(e))
+#define ENV_OFFSET offsetof(SW64CPU, env)
+
+#define cpu_init(cpu_model) cpu_generic_init(TYPE_SW64_CPU, cpu_model)
+
+#define SW64_CPU_TYPE_SUFFIX "-" TYPE_SW64_CPU
+#define SW64_CPU_TYPE_NAME(name) (name SW64_CPU_TYPE_SUFFIX)
+int cpu_sw64_signal_handler(int host_signum, void *pinfo, void *puc);
+bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+uint64_t sw64_ldl_phys(CPUState *cs, hwaddr addr);
+hwaddr sw64_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
+void sw64_stl_phys(CPUState *cs, hwaddr addr, uint64_t val);
+uint64_t sw64_ldw_phys(CPUState *cs, hwaddr addr);
+void sw64_stw_phys(CPUState *cs, hwaddr addr, uint64_t val);
+uint64_t cpu_sw64_load_fpcr(CPUSW64State *env);
+void cpu_sw64_store_fpcr(CPUSW64State *env, uint64_t val);
+void sw64_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+bool sw64_cpu_has_work(CPUState *cs);
+extern struct VMStateDescription vmstate_sw64_cpu;
+
+/* SW64-specific interrupt pending bits */
+#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0
+#define CPU_INTERRUPT_IIMAIL CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_PCIE CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_WAKEUP CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_SLAVE CPU_INTERRUPT_TGT_EXT_4
+
+#define cpu_signal_handler cpu_sw64_signal_handler
+#define CPU_RESOLVING_TYPE TYPE_SW64_CPU
+
+#define SWCSR(x, y) x = y
+enum {
+ SWCSR(ITB_TAG, 0x0),
+ SWCSR(ITB_PTE, 0x1),
+ SWCSR(ITB_IA, 0x2),
+ SWCSR(ITB_IV, 0x3),
+ SWCSR(ITB_IVP, 0x4),
+ SWCSR(ITB_IU, 0x5),
+ SWCSR(ITB_IS, 0x6),
+ SWCSR(EXC_SUM, 0xd),
+ SWCSR(EXC_PC, 0xe),
+ SWCSR(DS_STAT, 0x48),
+ SWCSR(CID, 0xc4),
+ SWCSR(TID, 0xc7),
+
+ SWCSR(DTB_TAG, 0x40),
+ SWCSR(DTB_PTE, 0x41),
+ SWCSR(DTB_IA, 0x42),
+ SWCSR(DTB_IV, 0x43),
+ SWCSR(DTB_IVP, 0x44),
+ SWCSR(DTB_IU, 0x45),
+ SWCSR(DTB_IS, 0x46),
+ SWCSR(II_REQ, 0x82),
+
+ SWCSR(PTBR, 0x8),
+ SWCSR(PRI_BASE, 0x10),
+ SWCSR(TIMER_CTL, 0x2a),
+ SWCSR(INT_STAT, 0x30),
+ SWCSR(INT_CLR, 0x31),
+ SWCSR(IER, 0x32),
+ SWCSR(INT_PCI_INT, 0x33),
+ SWCSR(DVA, 0x4e),
+};
+
+#include "exec/cpu-all.h"
+static inline void cpu_get_tb_cpu_state(CPUSW64State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *pflags = env->flags;
+}
+
+void sw64_translate_init(void);
+
+enum {
+ EXCP_NONE,
+ EXCP_HALT,
+ EXCP_IIMAIL,
+ EXCP_OPCDEC,
+ EXCP_CALL_SYS,
+ EXCP_ARITH,
+ EXCP_UNALIGN,
+#ifdef SOFTMMU
+ EXCP_MMFAULT,
+#else
+ EXCP_DTBD,
+ EXCP_DTBS_U,
+ EXCP_DTBS_K,
+ EXCP_ITB_U,
+ EXCP_ITB_K,
+#endif
+ EXCP_CLK_INTERRUPT,
+ EXCP_DEV_INTERRUPT,
+ EXCP_SLAVE,
+};
+
+#define CSR_SHIFT_AND_MASK(name, func, shift, bits) \
+ name##_##func##_S = shift, \
+ name##_##func##_V = bits, \
+ name##_##func##_M = (1UL << bits) - 1
+
+#define FPCR_MASK(name) ((uint64_t)FPCR_##name##_M << FPCR_##name##_S)
+/* FPCR */
+enum {
+ CSR_SHIFT_AND_MASK(FPCR, EXC_CTL, 0, 2),
+ CSR_SHIFT_AND_MASK(FPCR, EXC_CTL_WEN, 2, 1),
+ CSR_SHIFT_AND_MASK(FPCR, RSV0, 3, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INV3, 4, 1),
+ CSR_SHIFT_AND_MASK(FPCR, ZERO0, 5, 1),
+ CSR_SHIFT_AND_MASK(FPCR, OVF3, 6, 1),
+ CSR_SHIFT_AND_MASK(FPCR, UNF3, 7, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INE3, 8, 1),
+ CSR_SHIFT_AND_MASK(FPCR, ZERO1, 9, 1),
+ CSR_SHIFT_AND_MASK(FPCR, RSV1, 10, 10),
+ CSR_SHIFT_AND_MASK(FPCR, INV2, 20, 1),
+ CSR_SHIFT_AND_MASK(FPCR, ZERO2, 21, 1),
+ CSR_SHIFT_AND_MASK(FPCR, OVF2, 22, 1),
+ CSR_SHIFT_AND_MASK(FPCR, UNF2, 23, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INE2, 24, 1),
+ CSR_SHIFT_AND_MASK(FPCR, ZERO3, 25, 1),
+ CSR_SHIFT_AND_MASK(FPCR, RSV2, 26, 10),
+ CSR_SHIFT_AND_MASK(FPCR, INV1, 36, 1),
+ CSR_SHIFT_AND_MASK(FPCR, ZERO4, 37, 1),
+ CSR_SHIFT_AND_MASK(FPCR, OVF1, 38, 1),
+ CSR_SHIFT_AND_MASK(FPCR, UNF1, 39, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INE1, 40, 1),
+ CSR_SHIFT_AND_MASK(FPCR, ZERO5, 41, 1),
+ CSR_SHIFT_AND_MASK(FPCR, RSV3, 42, 6),
+ CSR_SHIFT_AND_MASK(FPCR, DNZ, 48, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INVD, 49, 1),
+ CSR_SHIFT_AND_MASK(FPCR, DZED, 50, 1),
+ CSR_SHIFT_AND_MASK(FPCR, OVFD, 51, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INV0, 52, 1),
+ CSR_SHIFT_AND_MASK(FPCR, DZE0, 53, 1),
+ CSR_SHIFT_AND_MASK(FPCR, OVF0, 54, 1),
+ CSR_SHIFT_AND_MASK(FPCR, UNF0, 55, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INE0, 56, 1),
+ CSR_SHIFT_AND_MASK(FPCR, OVI0, 57, 1),
+ CSR_SHIFT_AND_MASK(FPCR, DYN, 58, 2),
+ CSR_SHIFT_AND_MASK(FPCR, UNDZ, 60, 1),
+ CSR_SHIFT_AND_MASK(FPCR, UNFD, 61, 1),
+ CSR_SHIFT_AND_MASK(FPCR, INED, 62, 1),
+ CSR_SHIFT_AND_MASK(FPCR, SUM, 63, 1),
+};
+
+/* Arithmetic exception (entArith) constants. */
+#define EXC_M_SWC 1 /* Software completion */
+#define EXC_M_INV 2 /* Invalid operation */
+#define EXC_M_DZE 4 /* Division by zero */
+#define EXC_M_OVF 8 /* Overflow */
+#define EXC_M_UNF 16 /* Underflow */
+#define EXC_M_INE 32 /* Inexact result */
+#define EXC_M_IOV 64 /* Integer Overflow */
+#define EXC_M_DNO 128 /* Denomal operation */
+
+void QEMU_NORETURN dynamic_excp(CPUSW64State *env, uintptr_t retaddr, int excp,
+ int error);
+void QEMU_NORETURN arith_excp(CPUSW64State *env, uintptr_t retaddr, int exc,
+ uint64_t mask);
+
+#define DEBUG_ARCH
+#ifdef DEBUG_ARCH
+#define arch_assert(x) \
+ do { \
+ g_assert(x); /*fprintf(stderr, "+6b %d\n", __LINE__); */ \
+ } while (0)
+#else
+#define arch_assert(x)
+#endif
+
+typedef struct SW64CPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+ void (*class_init)(ObjectClass *oc, void *data);
+} SW64CPUInfo;
+#define test_feature(env, x) (env->features & (x))
+
+/* Slave */
+#endif
diff --git a/target/sw64/exception.c b/target/sw64/exception.c
new file mode 100644
index 0000000000..a2df1cd329
--- /dev/null
+++ b/target/sw64/exception.c
@@ -0,0 +1,76 @@
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+#include "exec/helper-proto.h"
+#include "hw/core/cpu.h"
+
+#ifndef CONFIG_USER_ONLY
+void QEMU_NORETURN sw64_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ uint32_t insn = 0;
+
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr, true);
+ }
+
+ fprintf(stderr, "Error %s addr = %lx\n", __func__, addr);
+ env->csr[DVA] = addr;
+
+ env->csr[EXC_SUM] = ((insn >> 21) & 31) << 8; /* opcode */
+ env->csr[DS_STAT] = (insn >> 26) << 4; /* dest regno */
+ cs->exception_index = EXCP_UNALIGN;
+ env->error_code = 0;
+ cpu_loop_exit(cs);
+}
+
+#endif
+
+/* This should only be called from translate, via gen_excp.
+ We expect that ENV->PC has already been updated. */
+void QEMU_NORETURN helper_excp(CPUSW64State *env, int excp, int error)
+{
+ SW64CPU *cpu = sw64_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ env->error_code = error;
+ cpu_loop_exit(cs);
+}
+
+/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
+void QEMU_NORETURN dynamic_excp(CPUSW64State *env, uintptr_t retaddr, int excp,
+ int error)
+{
+ SW64CPU *cpu = sw64_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ env->error_code = error;
+ if (retaddr) {
+ /* FIXME: Not jump to another tb, but jump to next insn emu */
+ cpu_restore_state(cs, retaddr, true);
+ /* Floating-point exceptions (our only users) point to the next PC. */
+ env->pc += 4;
+ }
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN arith_excp(CPUSW64State *env, uintptr_t retaddr, int exc,
+ uint64_t mask)
+{
+ env->csr[EXC_SUM] = exc;
+ dynamic_excp(env, retaddr, EXCP_ARITH, 0);
+}
+
+
+void helper_trace_mem(CPUSW64State *env, uint64_t addr, uint64_t val)
+{
+ /* printf("pc = %lx: Access mem addr =%lx, val = %lx\n", env->pc, addr,val); */
+}
diff --git a/target/sw64/float_helper.c b/target/sw64/float_helper.c
new file mode 100644
index 0000000000..ad1c3cce48
--- /dev/null
+++ b/target/sw64/float_helper.c
@@ -0,0 +1,846 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+static inline uint32_t extractFloat16Frac(float16 a)
+{
+ return float16_val(a) & 0x3ff;
+}
+
+/*----------------------------------------------------------------------------
+| Returns the exponent bits of the half-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+
+static inline int extractFloat16Exp(float16 a)
+{
+ return (float16_val(a) >> 10) & 0x1f;
+}
+
+/*----------------------------------------------------------------------------
+| Returns the sign bit of the single-precision floating-point value `a'.
+*----------------------------------------------------------------------------*/
+
+static inline uint8_t extractFloat16Sign(float16 a)
+{
+ return float16_val(a) >> 15;
+}
+
+#define FP_STATUS (env->fp_status)
+
+#define CONVERT_BIT(X, SRC, DST) \
+ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X)&SRC) * (DST / SRC))
+
+static uint64_t soft_to_errcode_exc(CPUSW64State *env)
+{
+ uint8_t exc = get_float_exception_flags(&FP_STATUS);
+
+ if (unlikely(exc)) {
+ set_float_exception_flags(0, &FP_STATUS);
+ }
+ return exc;
+}
+
+static inline uint64_t float32_to_s_int(uint32_t fi)
+{
+ uint32_t frac = fi & 0x7fffff;
+ uint32_t sign = (fi >> 31) & 1;
+ uint32_t exp_msb = (fi >> 30) & 1;
+ uint32_t exp_low = (fi >> 23) & 0x7f;
+ uint32_t exp;
+
+ exp = (exp_msb << 10) | exp_low;
+ if (exp_msb) {
+ if (exp_low == 0x7f) {
+ exp = 0x7ff;
+ }
+ } else {
+ if (exp_low != 0x00) {
+ exp |= 0x380;
+ }
+ }
+
+ return (((uint64_t)sign << 63) | ((uint64_t)exp << 52) |
+ ((uint64_t)frac << 29));
+}
+
+static inline uint64_t float32_to_s(float32 fa)
+{
+ CPU_FloatU a;
+ a.f = fa;
+ return float32_to_s_int(a.l);
+}
+static inline uint32_t s_to_float32_int(uint64_t a)
+{
+ return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
+}
+
+static inline float32 s_to_float32(uint64_t a)
+{
+ CPU_FloatU r;
+ r.l = s_to_float32_int(a);
+ return r.f;
+}
+
+uint32_t helper_s_to_memory(uint64_t a)
+{
+ return s_to_float32(a);
+}
+
+uint64_t helper_memory_to_s(uint32_t a)
+{
+ return float32_to_s(a);
+}
+
+uint64_t helper_fcvtls(CPUSW64State *env, uint64_t a)
+{
+ float32 fr = int64_to_float32(a, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fcvtld(CPUSW64State *env, uint64_t a)
+{
+ float64 fr = int64_to_float64(a, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+ return (uint64_t)fr;
+}
+
+static uint64_t do_fcvtdl(CPUSW64State *env, uint64_t a, uint64_t roundmode)
+{
+ uint64_t frac, ret = 0;
+ uint32_t exp, sign, exc = 0;
+ int shift;
+
+ sign = (a >> 63);
+ exp = (uint32_t)(a >> 52) & 0x7ff;
+ frac = a & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ if (unlikely(frac != 0) && !env->fp_status.flush_inputs_to_zero) {
+ goto do_underflow;
+ }
+ } else if (exp == 0x7ff) {
+ exc = float_flag_invalid;
+ } else {
+ /* Restore implicit bit. */
+ frac |= 0x10000000000000ull;
+
+ shift = exp - 1023 - 52;
+ if (shift >= 0) {
+ /* In this case the number is so large that we must shift
+ the fraction left. There is no rounding to do. */
+ if (shift < 64) {
+ ret = frac << shift;
+ }
+ /* Check for overflow. Note the special case of -0x1p63. */
+ if (shift >= 11 && a != 0xC3E0000000000000ull) {
+ exc = float_flag_inexact;
+ }
+ } else {
+ uint64_t round;
+
+ /* In this case the number is smaller than the fraction as
+ represented by the 52 bit number. Here we must think
+ about rounding the result. Handle this by shifting the
+ fractional part of the number into the high bits of ROUND.
+ This will let us efficiently handle round-to-nearest. */
+ shift = -shift;
+ if (shift < 63) {
+ ret = frac >> shift;
+ round = frac << (64 - shift);
+ } else {
+ /* The exponent is so small we shift out everything.
+ Leave a sticky bit for proper rounding below. */
+ do_underflow:
+ round = 1;
+ }
+
+ if (round) {
+ exc = float_flag_inexact;
+ switch (roundmode) {
+ case float_round_nearest_even:
+ if (round == (1ull << 63)) {
+ /* Fraction is exactly 0.5; round to even. */
+ ret += (ret & 1);
+ } else if (round > (1ull << 63)) {
+ ret += 1;
+ }
+ break;
+ case float_round_to_zero:
+ break;
+ case float_round_up:
+ ret += 1 - sign;
+ break;
+ case float_round_down:
+ ret += sign;
+ break;
+ }
+ }
+ }
+ if (sign) {
+ ret = -ret;
+ }
+ }
+ env->error_code = exc;
+
+ return ret;
+}
+
+/* TODO: */
+uint64_t helper_fris(CPUSW64State *env, uint64_t a, uint64_t roundmode)
+{
+ uint64_t ir;
+ float32 fr;
+
+ if (roundmode == 5)
+ roundmode = env->fpcr_round_mode;
+ ir = do_fcvtdl(env, a, roundmode);
+ fr = int64_to_float32(ir, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+/* TODO: */
+uint64_t helper_frid(CPUSW64State *env, uint64_t a, uint64_t roundmode)
+{
+ if (roundmode == 5)
+ roundmode = env->fpcr_round_mode;
+ return int64_to_float64(do_fcvtdl(env, a, roundmode), &FP_STATUS);
+}
+
+uint64_t helper_fcvtdl(CPUSW64State *env, uint64_t a, uint64_t roundmode)
+{
+ return do_fcvtdl(env, a, roundmode);
+}
+
+uint64_t helper_fcvtdl_dyn(CPUSW64State *env, uint64_t a)
+{
+ uint64_t roundmode = (uint64_t)(env->fpcr_round_mode);
+ return do_fcvtdl(env, a, roundmode);
+}
+
+uint64_t helper_fcvtsd(CPUSW64State *env, uint64_t a)
+{
+ float32 fa;
+ float64 fr;
+
+ fa = s_to_float32(a);
+ fr = float32_to_float64(fa, &FP_STATUS);
+
+ return fr;
+}
+
+uint64_t helper_fcvtds(CPUSW64State *env, uint64_t a)
+{
+ float32 fa;
+
+ fa = float64_to_float32((float64)a, &FP_STATUS);
+
+ return float32_to_s(fa);
+}
+
+uint64_t helper_fcvtwl(CPUSW64State *env, uint64_t a)
+{
+ int32_t ret;
+ ret = (a >> 29) & 0x3fffffff;
+ ret |= ((a >> 62) & 0x3) << 30;
+ return (uint64_t)(int64_t)ret; //int32_t to int64_t as Sign-Extend
+}
+
+uint64_t helper_fcvtlw(CPUSW64State *env, uint64_t a)
+{
+ uint64_t ret;
+ ret = (a & 0x3fffffff) << 29;
+ ret |= ((a >> 30) & 0x3) << 62;
+ return ret;
+}
+
+uint64_t helper_fadds(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+#if 1
+ fr = float32_add(fa, fb, &FP_STATUS);
+
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(float*)&fr = *(float*)&fb + *(float*)&fa;
+#endif
+ return float32_to_s(fr);
+}
+
+/* Input handing without software completion. Trap for all
+ non-finite numbers. */
+uint64_t helper_faddd(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = (float64)a;
+ fb = (float64)b;
+#if 1
+ fr = float64_add(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(double*)&fr = *(double*)&fb + *(double*)&fa;
+#endif
+ return (uint64_t)fr;
+}
+
+uint64_t helper_fsubs(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+#if 1
+ fr = float32_sub(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(float*)&fr = *(float*)&fa - *(float*)&fb;
+#endif
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fsubd(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = (float64)a;
+ fb = (float64)b;
+#if 1
+ fr = float64_sub(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(double*)&fr = *(double*)&fa - *(double*)&fb;
+#endif
+ return (uint64_t)fr;
+}
+
+uint64_t helper_fmuls(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+#if 1
+ fr = float32_mul(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(float*)&fr = *(float*)&fa * *(float*)&fb;
+#endif
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fmuld(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = (float64)a;
+ fb = (float64)b;
+#if 1
+ fr = float64_mul(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(double*)&fr = *(double*)&fa * *(double*)&fb;
+#endif
+ return (uint64_t)fr;
+}
+
+uint64_t helper_fdivs(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+#if 1
+ fr = float32_div(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(float*)&fr = *(float*)&fa / *(float*)&fb;
+#endif
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fdivd(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = (float64)a;
+ fb = (float64)b;
+#if 1
+ fr = float64_div(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(double*)&fr = *(double*)&fa / *(double*)&fb;
+#endif
+
+ return (uint64_t)fr;
+}
+
+uint64_t helper_frecs(CPUSW64State *env, uint64_t a)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = int64_to_float32(1, &FP_STATUS);
+#if 1
+ fr = float32_div(fb, fa, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(float*)&fr = *(float*)&fb / *(float*)&fa;
+#endif
+ return float32_to_s(fr);
+}
+
+uint64_t helper_frecd(CPUSW64State *env, uint64_t a)
+{
+ float64 fa, fb, fr;
+
+ fa = (float64)a;
+ fb = int64_to_float64(1, &FP_STATUS);
+#if 1
+ fr = float64_div(fb, fa, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+ *(double*)&fr = *(double*)&fb / *(double*)&fa;
+#endif
+
+ return (uint64_t)fr;
+}
+
+uint64_t helper_fsqrts(CPUSW64State *env, uint64_t b)
+{
+ float32 fb, fr;
+#if 1
+ fb = s_to_float32(b);
+ fr = float32_sqrt(fb, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+#include <math.h>
+ *(double*)&fr = sqrt(*(double*)&b);
+#endif
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fsqrt(CPUSW64State *env, uint64_t b)
+{
+ float64 fr;
+
+#if 1
+ fr = float64_sqrt(b, &FP_STATUS);
+ env->error_code = soft_to_errcode_exc(env);
+#else
+#include <math.h>
+ *(double*)&fr = sqrt(*(double*)&b);
+#endif
+
+ return (uint64_t)fr;
+}
+
+
+uint64_t helper_fmas(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float32 fa, fb, fc, fr;
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fc = s_to_float32(c);
+
+ fr = float32_muladd(fa, fb, fc, 0, &FP_STATUS);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fmad(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float64 fr;
+
+ fr = float64_muladd(a, b, c, 0, &FP_STATUS);
+
+ return fr;
+}
+
+
+uint64_t helper_fmss(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float32 fa, fb, fc, fr;
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fc = s_to_float32(c);
+
+ fr = float32_muladd(fa, fb, fc, float_muladd_negate_c, &FP_STATUS);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fmsd(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float64 fr;
+
+ fr = float64_muladd(a, b, c, float_muladd_negate_c, &FP_STATUS);
+
+ return fr;
+}
+
+
+uint64_t helper_fnmas(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float32 fa, fb, fc, fr;
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fc = s_to_float32(c);
+ int flag = float_muladd_negate_product;
+
+ fr = float32_muladd(fa, fb, fc, flag, &FP_STATUS);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fnmad(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float64 fr;
+ int flag = float_muladd_negate_product;
+
+ fr = float64_muladd(a, b, c, flag, &FP_STATUS);
+
+ return fr;
+}
+
+uint64_t helper_fnmss(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float32 fa, fb, fc, fr;
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fc = s_to_float32(c);
+ int flag = float_muladd_negate_product | float_muladd_negate_c;
+
+ fr = float32_muladd(fa, fb, fc, flag, &FP_STATUS);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_fnmsd(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c)
+{
+ float64 fr;
+ int flag = float_muladd_negate_product | float_muladd_negate_c;
+
+ fr = float64_muladd(a, b, c, flag, &FP_STATUS);
+
+ return fr;
+}
+uint64_t helper_load_fpcr(CPUSW64State *env)
+{
+ return cpu_sw64_load_fpcr(env);
+}
+
+static void update_fpcr_status_mask(CPUSW64State *env)
+{
+ uint64_t t = 0;
+
+ /* Don't mask the inv excp:
+ * EXC_CTL1 = 1
+ * EXC_CTL1 = 0, input denormal, DNZ=0
+ * EXC_CTL1 = 0, no input denormal or DNZ=1, INVD = 0
+ */
+ if ((env->fpcr & FPCR_MASK(EXC_CTL) & 0x2)) {
+ if (env->fpcr & FPCR_MASK(EXC_CTL) & 0x1) {
+ t |= (EXC_M_INE | EXC_M_UNF | EXC_M_IOV);
+ } else {
+ t |= EXC_M_INE;
+ }
+ } else {
+ /* INV and DNO mask */
+ if (env->fpcr & FPCR_MASK(DNZ)) t |= EXC_M_DNO;
+ if (env->fpcr & FPCR_MASK(INVD)) t |= EXC_M_INV;
+ if (env->fpcr & FPCR_MASK(OVFD)) t |= EXC_M_OVF;
+ if (env->fpcr & FPCR_MASK(UNFD)) {
+ t |= EXC_M_UNF;
+ }
+ if (env->fpcr & FPCR_MASK(DZED)) t |= EXC_M_DZE;
+ if (env->fpcr & FPCR_MASK(INED)) t |= EXC_M_INE;
+ }
+
+ env->fpcr_exc_enable = t;
+}
+
+void helper_store_fpcr(CPUSW64State *env, uint64_t val)
+{
+ uint64_t fpcr = val;
+ uint8_t ret;
+
+ switch ((fpcr & FPCR_MASK(DYN)) >> FPCR_DYN_S) {
+ case 0x0:
+ ret = float_round_to_zero;
+ break;
+ case 0x1:
+ ret = float_round_down;
+ break;
+ case 0x2:
+ ret = float_round_nearest_even;
+ break;
+ case 0x3:
+ ret = float_round_up;
+ break;
+ default:
+ ret = float_round_nearest_even;
+ break;
+ }
+
+ env->fpcr_round_mode = ret;
+
+ env->fp_status.float_rounding_mode = ret;
+
+ env->fpcr_flush_to_zero =
+ (fpcr & FPCR_MASK(UNFD)) && (fpcr & FPCR_MASK(UNDZ));
+ env->fp_status.flush_to_zero = env->fpcr_flush_to_zero;
+
+ /* FIXME: Now the DNZ flag does not work int C3A. */
+ //set_flush_inputs_to_zero((val & FPCR_MASK(DNZ)) != 0? 1 : 0, &FP_STATUS);
+
+ val &= ~0x3UL;
+ val |= env->fpcr & 0x3UL;
+ env->fpcr = val;
+ update_fpcr_status_mask(env);
+}
+
+void helper_setfpcrx(CPUSW64State *env, uint64_t val)
+{
+ if (env->fpcr & FPCR_MASK(EXC_CTL_WEN)) {
+ env->fpcr &= ~3UL;
+ env->fpcr |= val & 0x3;
+ update_fpcr_status_mask(env);
+ }
+}
+#ifndef CONFIG_USER_ONLY
+static uint32_t soft_to_exc_type(uint64_t exc)
+{
+ uint32_t ret = 0;
+
+ if (unlikely(exc)) {
+ ret |= CONVERT_BIT(exc, float_flag_invalid, EXC_M_INV);
+ ret |= CONVERT_BIT(exc, float_flag_divbyzero, EXC_M_DZE);
+ ret |= CONVERT_BIT(exc, float_flag_overflow, EXC_M_OVF);
+ ret |= CONVERT_BIT(exc, float_flag_underflow, EXC_M_UNF);
+ ret |= CONVERT_BIT(exc, float_flag_inexact, EXC_M_INE);
+ }
+
+ return ret;
+}
+static void fp_exc_raise1(CPUSW64State *env, uintptr_t retaddr, uint64_t exc,
+ uint32_t regno)
+{
+ if (!likely(exc))
+ return;
+ arith_excp(env, retaddr, exc, 1ull << regno);
+}
+
+void helper_fp_exc_raise(CPUSW64State *env, uint32_t regno)
+{
+ uint64_t exc = env->error_code;
+ uint32_t exc_type = soft_to_exc_type(exc);
+
+ if (exc_type) {
+ exc_type &= ~(env->fpcr_exc_enable);
+ if (exc_type) fp_exc_raise1(env, GETPC(), exc_type | EXC_M_SWC, regno);
+ }
+}
+#endif
+
+void helper_ieee_input(CPUSW64State *env, uint64_t val)
+{
+#ifndef CONFIG_USER_ONLY
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ uint64_t frac = val & 0xfffffffffffffull;
+
+ if (exp == 0x7ff) {
+ /* Infinity or NaN. */
+ uint32_t exc_type = EXC_M_INV;
+
+ if (exc_type) {
+ exc_type &= ~(env->fpcr_exc_enable);
+ if (exc_type)
+ fp_exc_raise1(env, GETPC(), exc_type | EXC_M_SWC, 32);
+ }
+ }
+#endif
+}
+
+void helper_ieee_input_s(CPUSW64State *env, uint64_t val)
+{
+ if (unlikely(2 * val - 1 < 0x1fffffffffffffull) &&
+ !env->fp_status.flush_inputs_to_zero) {
+ }
+}
+
+static inline float64 t_to_float64(uint64_t a)
+{
+ /* Memory format is the same as float64 */
+ CPU_DoubleU r;
+ r.ll = a;
+ return r.d;
+}
+
+uint64_t helper_fcmpun(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_fcmpeq(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_fcmple(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_le_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_fcmplt(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_lt_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_fcmpge(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_le_quiet(fb, fa, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_fcmpgt(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_lt_quiet(fb, fa, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_fcmpge_s(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ /* Make sure va and vb is s float. */
+ fa = float32_to_float64(s_to_float32(a), &FP_STATUS);
+ fb = float32_to_float64(s_to_float32(b), &FP_STATUS);
+
+ if (float64_le_quiet(fb, fa, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_fcmple_s(CPUSW64State *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ /* Make sure va and vb is s float. */
+ fa = float32_to_float64(s_to_float32(a), &FP_STATUS);
+ fb = float32_to_float64(s_to_float32(b), &FP_STATUS);
+
+ if (float64_le_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_errcode_exc(env);
+
+ return ret;
+}
+
+void helper_vfcvtsh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc,
+ uint64_t rd)
+{
+ uint64_t temp = 0;
+ int i;
+ for (i = 0; i < 4; i++) {
+ temp |= (uint64_t)float32_to_float16(s_to_float32(env->fr[ra + i * 32]),
+ 1, &FP_STATUS)
+ << (i * 16);
+ }
+ for (i = 0; i < 4; i++) {
+ if (i == (vc & 0x3)) {
+ env->fr[rd + i * 32] = temp;
+ } else {
+ env->fr[rd + i * 32] = env->fr[rb + i * 32];
+ }
+ }
+}
+
+void helper_vfcvths(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc,
+ uint64_t rd)
+{
+ uint64_t temp;
+ int i;
+
+ temp = env->fr[ra + 32 * (vc & 0x3)];
+ for (i = 0; i < 4; i++) {
+ env->fr[rd + i * 32] = float32_to_s(
+ float16_to_float32((temp >> (i * 16)) & 0xffffUL, 1, &FP_STATUS));
+ }
+}
diff --git a/target/sw64/helper.c b/target/sw64/helper.c
new file mode 100644
index 0000000000..0cc0af7087
--- /dev/null
+++ b/target/sw64/helper.c
@@ -0,0 +1,349 @@
+/*
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+#include "exec/helper-proto.h"
+#include "hw/core/cpu.h"
+#include "exec/memattrs.h"
+
+#if defined(CONFIG_USER_ONLY)
+bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+
+ cs->exception_index = EXCP_MMFAULT;
+ cpu->env.trap_arg0 = address;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#else
+static target_ulong ldq_phys_clear(CPUState *cs, target_ulong phys)
+{
+ return ldq_phys(cs->as, phys & ~(3UL));
+}
+
+static int get_sw64_physical_address(CPUSW64State *env, target_ulong addr,
+ int prot_need, int mmu_idx, target_ulong *pphys,
+ int *pprot)
+{
+ CPUState *cs = CPU(sw64_env_get_cpu(env));
+ target_ulong phys = 0;
+ int prot = 0;
+ int ret = MM_K_ACV;
+ target_ulong L1pte, L2pte, L3pte, L4pte;
+ target_ulong pt, index, pte_pfn_s;
+
+ if (((addr >> 28) & 0xffffffff8) == 0xffffffff8) {
+ phys = (~(0xffffffff80000000)) & addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = -1;
+ goto exit;
+ } else if (((addr >> 32) & 0xfffff000) == 0xfffff000) {
+ goto do_pgmiss;
+ } else if (((addr >> 52) & 0xfff) == 0xfff) {
+ phys = (~(0xfff0000000000000)) & addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = -1;
+ goto exit;
+ }
+do_pgmiss:
+ pte_pfn_s = 28;
+ pt = env->csr[PTBR];
+ index = (addr >> (TARGET_PAGE_BITS + 3 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1);
+ L1pte = ldq_phys_clear(cs, pt + index * 8);
+ if ((L1pte & PTE_VALID) == 0) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (((L1pte >> 1) & 1) && prot_need == 0) {
+ ret = MM_K_FOR;
+ goto exit;
+ }
+ if (((L1pte >> 2) & 1) && prot_need == 1) {
+ ret = MM_K_FOW;
+ goto exit;
+ }
+ pt = L1pte >> pte_pfn_s << TARGET_PAGE_BITS;
+
+ index = (addr >> (TARGET_PAGE_BITS + 2 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1);
+ L2pte = ldq_phys_clear(cs, pt + index * 8);
+
+ if ((L2pte & PTE_VALID) == 0) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (((L2pte >> 1) & 1) && prot_need == 0) {
+ ret = MM_K_FOR;
+ goto exit;
+ }
+ if (((L2pte >> 2) & 1) && prot_need == 1) {
+ ret = MM_K_FOW;
+ goto exit;
+ }
+
+ pt = L2pte >> pte_pfn_s << TARGET_PAGE_BITS;
+
+ index = (addr >> (TARGET_PAGE_BITS + 1 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1);
+ L3pte = ldq_phys_clear(cs, pt + index * 8);
+
+ if ((L3pte & PTE_VALID) == 0) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (((L3pte >> 1) & 1) && prot_need == 0) {
+ ret = MM_K_FOR;
+ goto exit;
+ }
+ if (((L3pte >> 2) & 1) && prot_need == 1) {
+ ret = MM_K_FOW;
+ goto exit;
+ }
+
+ pt = L3pte >> pte_pfn_s << TARGET_PAGE_BITS;
+
+ index = (addr >> TARGET_PAGE_BITS) & ((1 << TARGET_LEVEL_BITS)-1);
+ L4pte = ldq_phys_clear(cs, pt + index * 8);
+ if ((L4pte & PTE_VALID) == 0) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
+#error page bits out of date
+#endif
+
+ /* Check access violations. */
+ if ((L4pte & PTE_FOR) == 0) {
+ prot |= PAGE_READ | PAGE_EXEC;
+ }
+ if ((L4pte & PTE_FOW) == 0) {
+ prot |= PAGE_WRITE;
+ }
+
+ /* Check fault-on-operation violations. */
+ prot &= ~(L4pte >> 1);
+
+ phys = (L4pte >> pte_pfn_s << TARGET_PAGE_BITS);
+
+ if (unlikely((prot & prot_need) == 0)) {
+ ret = (prot_need & PAGE_EXEC
+ ? MM_K_FOE
+ : prot_need & PAGE_WRITE
+ ? MM_K_FOW
+ : prot_need & PAGE_READ ? MM_K_FOR : -1);
+ goto exit;
+ }
+
+ ret = -1;
+exit:
+ *pphys = phys;
+ *pprot = prot;
+ return ret;
+}
+
+bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ target_ulong phys;
+ int prot, fail;
+
+ if (mmu_idx == MMU_PHYS_IDX) {
+ phys = address;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ fail = 0;
+ if ((address >> 52) & 1) goto do_pgmiss;
+ goto done;
+ }
+
+do_pgmiss:
+ fail = get_sw64_physical_address(env, address, 1 << access_type, mmu_idx, &phys, &prot);
+ if (unlikely(fail >= 0)) {
+ if (probe) {
+ return false;
+ }
+ cs->exception_index = EXCP_MMFAULT;
+ if (access_type == 2) {
+ env->csr[DS_STAT] = fail;
+ env->csr[DVA] = address & ~(3UL);
+ } else {
+ env->csr[DS_STAT] = fail | (((unsigned long)access_type + 1) << 3);
+ env->csr[DVA] = address;
+ }
+ env->error_code = access_type;
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+done:
+ tlb_set_page(cs, address & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+}
+
+hwaddr sw64_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ target_ulong phys;
+ int prot, fail;
+ int mmu_index = cpu_mmu_index(env, 0);
+ if (mmu_index == MMU_PHYS_IDX) {
+ phys = addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ fail = -1;
+ if ((addr >> 52) & 1) goto do_pgmiss;
+ goto done;
+ }
+do_pgmiss:
+ fail = get_sw64_physical_address(&cpu->env, addr, 1, mmu_index, &phys, &prot);
+done:
+ return (fail >= 0 ? -1 : phys);
+}
+#endif
+
+static void update_fpcr_status_mask(CPUSW64State* env) {
+ uint64_t t = 0;
+
+ /* Don't mask the inv excp:
+ * EXC_CTL1 = 1
+ * EXC_CTL1 = 0, input denormal, DNZ=0
+ * EXC_CTL1 = 0, no input denormal or DNZ=1, INVD = 0
+ */
+ if ((env->fpcr & FPCR_MASK(EXC_CTL) & 0x2)) {
+ if (env->fpcr & FPCR_MASK(EXC_CTL) & 0x1) {
+ t |= (EXC_M_INE | EXC_M_UNF | EXC_M_IOV);
+ } else {
+ t |= EXC_M_INE;
+ }
+ } else {
+ /* INV and DNO mask */
+ if (env->fpcr & FPCR_MASK(DNZ)) t |= EXC_M_DNO;
+ if (env->fpcr & FPCR_MASK(INVD)) t |= EXC_M_INV;
+ if (env->fpcr & FPCR_MASK(OVFD)) t |= EXC_M_OVF;
+ if (env->fpcr & FPCR_MASK(UNFD)) {
+ t |= EXC_M_UNF;
+ }
+ if (env->fpcr & FPCR_MASK(DZED)) t |= EXC_M_DZE;
+ if (env->fpcr & FPCR_MASK(INED)) t |= EXC_M_INE;
+ }
+
+ env->fpcr_exc_enable = t;
+}
+
+void cpu_sw64_store_fpcr(CPUSW64State* env, uint64_t val) {
+ uint64_t fpcr = val;
+ uint8_t ret;
+
+ switch ((fpcr & FPCR_MASK(DYN)) >> FPCR_DYN_S) {
+ case 0x0:
+ ret = float_round_to_zero;
+ break;
+ case 0x1:
+ ret = float_round_down;
+ break;
+ case 0x2:
+ ret = float_round_nearest_even;
+ break;
+ case 0x3:
+ ret = float_round_up;
+ break;
+ default:
+ ret = float_round_nearest_even;
+ break;
+ }
+
+ env->fpcr_round_mode = ret;
+ env->fp_status.float_rounding_mode = ret;
+
+ env->fpcr_flush_to_zero =
+ (fpcr & FPCR_MASK(UNFD)) && (fpcr & FPCR_MASK(UNDZ));
+ env->fp_status.flush_to_zero = env->fpcr_flush_to_zero;
+
+ val &= ~0x3UL;
+ val |= env->fpcr & 0x3UL;
+ env->fpcr = val;
+ update_fpcr_status_mask(env);
+}
+
+uint64_t helper_read_csr(CPUSW64State *env, uint64_t index)
+{
+ if (index == PRI_BASE)
+ return 0x10000;
+ return env->csr[index];
+}
+
+uint64_t helper_rtc(void)
+{
+#ifndef CONFIG_USER_ONLY
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) * CPUFREQ_SCALE;
+#else
+ return 0;
+#endif
+}
+
+void helper_write_csr(CPUSW64State *env, uint64_t index, uint64_t va)
+{
+ env->csr[index] = va;
+#ifndef CONFIG_USER_ONLY
+ CPUState *cs = &(sw64_env_get_cpu(env)->parent_obj);
+ SW64CPU *cpu = SW64_CPU(cs);
+ if ((index == DTB_IA) || (index == DTB_IV) || (index == DTB_IVP) ||
+ (index == DTB_IU) || (index == DTB_IS) || (index == ITB_IA) ||
+ (index == ITB_IV) || (index == ITB_IVP) || (index == ITB_IU) ||
+ (index == ITB_IS) || (index == PTBR)) {
+ tlb_flush(cs);
+ }
+ if (index == INT_CLR || index == INT_PCI_INT) {
+ env->csr[INT_STAT] &= ~va;
+ }
+
+ if (index == TIMER_CTL && env->csr[index] == 1) {
+ timer_mod(cpu->alarm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1000000000 / 250);
+ }
+#endif
+}
+
+uint64_t cpu_sw64_load_fpcr(CPUSW64State *env)
+{
+ return (uint64_t)env->fpcr;
+}
+
+void helper_tb_flush(CPUSW64State *env)
+{
+ tb_flush(CPU(sw64_env_get_cpu(env)));
+}
+
+void helper_cpustate_update(CPUSW64State *env, uint64_t pc)
+{
+ switch (pc & 0x3) {
+ case 0x00:
+ env->flags = ENV_FLAG_HM_MODE;
+ break;
+ case 0x01:
+ env->flags &= ~(ENV_FLAG_PS_USER | ENV_FLAG_HM_MODE);
+ break;
+ case 0x02:
+ env->flags &= ~(ENV_FLAG_PS_USER | ENV_FLAG_HM_MODE);
+ break;
+ case 0x03:
+ env->flags = ENV_FLAG_PS_USER;
+ }
+}
diff --git a/target/sw64/helper.h b/target/sw64/helper.h
new file mode 100644
index 0000000000..7cafa563c2
--- /dev/null
+++ b/target/sw64/helper.h
@@ -0,0 +1,127 @@
+
+DEF_HELPER_FLAGS_2(zap, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(zapnot, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(cmpgeb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(s_to_memory, TCG_CALL_NO_RWG_SE, i32, i64)
+DEF_HELPER_FLAGS_1(memory_to_s, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_2(fcvtls, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvtld, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_3(fcvtdl, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvtdl_dyn, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_3(fris, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(frid, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvtsd, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvtds, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvtwl, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvtlw, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_5(vfcvtsh, 0, void, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_5(vfcvths, 0, void, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(frecs, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(frecd, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fsqrt, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_4(fmas, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmad, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmss, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmsd, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmas, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmad, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmss, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmsd, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_0(rtc, TCG_CALL_NO_RWG, i64)
+DEF_HELPER_FLAGS_1(load_fpcr, 0, i64, env)
+DEF_HELPER_FLAGS_2(store_fpcr, 0, void, env, i64)
+DEF_HELPER_FLAGS_2(setfpcrx, 0, void, env, i64)
+DEF_HELPER_FLAGS_2(ieee_input, 0, void, env, i64)
+DEF_HELPER_FLAGS_2(ieee_input_s, 0, void, env, i64)
+DEF_HELPER_FLAGS_2(read_csr, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_3(write_csr, 0, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(cpustate_update, 0, void, env, i64)
+DEF_HELPER_FLAGS_3(trace_mem, 0, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmpun, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmpeq, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmple, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmplt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmpge, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmpgt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmpge_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fcmple_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_4(srlow, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(sllow, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vlogzz, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vconw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vcond, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vshfw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_2(ctlzow, 0, i64, env, i64)
+DEF_HELPER_FLAGS_4(vucaddw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucaddwi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucsubw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucsubwi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucaddh, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucaddhi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucsubh, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucsubhi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucaddb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucaddbi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucsubb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vucsubbi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_3(vstw, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(vsts, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(vstd, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(v_print, 0, void, env, i64)
+DEF_HELPER_FLAGS_1(tb_flush, 0, void, env)
+DEF_HELPER_FLAGS_4(vmaxb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vminb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vmaxh, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vminh, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vmaxw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vminw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(sraow, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vsm4r, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vsm4key, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vsm3msw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vcmpueqb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vcmpugtb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vcmpueqbi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vcmpugtbi, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vumaxb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vuminb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vumaxh, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vuminh, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vumaxw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vuminw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_5(vinsb, 0, void, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_5(vinsh, 0, void, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vinsectlh, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vinsectlw, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vinsectlb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_5(vshfq, 0, void, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_4(vshfqb, 0, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_5(vsm3r, 0, void, env, i64, i64, i64, i64)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_FLAGS_2(fp_exc_raise, 0, void, env, i32)
+DEF_HELPER_FLAGS_2(pri_ldw, 0, i64, env, i64)
+DEF_HELPER_FLAGS_3(pri_stw, 0, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(pri_ldl, 0, i64, env, i64)
+DEF_HELPER_FLAGS_3(pri_stl, 0, void, env, i64, i64)
+#endif
+
+DEF_HELPER_3(excp, noreturn, env, int, int)
+//DEF_HELPER_FLAGS_3(faddh, TCG_CALL_NO_RWG, i64, env, i64, i64)
+//DEF_HELPER_FLAGS_3(fsubh, TCG_CALL_NO_RWG, i64, env, i64, i64)
+//DEF_HELPER_FLAGS_3(fmulh, TCG_CALL_NO_RWG, i64, env, i64, i64)
+#ifndef CONFIG_USER_ONLY
+/* Scale factor for core3 cpu freq, ie number of ns per tick. */
+#define CPUFREQ_SCALE 3
+#endif
+
+/* SLAVE FLOAT HELPER. */
diff --git a/target/sw64/int_helper.c b/target/sw64/int_helper.c
new file mode 100644
index 0000000000..131182585a
--- /dev/null
+++ b/target/sw64/int_helper.c
@@ -0,0 +1,118 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/memattrs.h"
+
+uint64_t helper_zapnot(uint64_t val, uint64_t mskb)
+{
+ uint64_t mask;
+
+ mask = -(mskb & 0x01) & 0x00000000000000ffull;
+ mask |= -(mskb & 0x02) & 0x000000000000ff00ull;
+ mask |= -(mskb & 0x04) & 0x0000000000ff0000ull;
+ mask |= -(mskb & 0x08) & 0x00000000ff000000ull;
+ mask |= -(mskb & 0x10) & 0x000000ff00000000ull;
+ mask |= -(mskb & 0x20) & 0x0000ff0000000000ull;
+ mask |= -(mskb & 0x40) & 0x00ff000000000000ull;
+ mask |= -(mskb & 0x80) & 0xff00000000000000ull;
+
+ return val & mask;
+}
+
+uint64_t helper_zap(uint64_t val, uint64_t mask)
+{
+ return helper_zapnot(val, ~mask);
+}
+
+uint64_t helper_cmpgeb(uint64_t va, uint64_t vb)
+{
+ int i;
+ uint64_t ret = 0;
+ uint64_t tmp;
+ for (i = 0; i < 64; i += 8) {
+ tmp = ((va >> i) & 0xff) + (~(vb >> i) & 0xff) + 1;
+ ret |= (tmp >> 8) << (i / 8);
+ }
+ return ret;
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline MemTxAttrs cpu_get_mem_attrs(CPUSW64State *env)
+{
+ return ((MemTxAttrs) { .secure = 1 });
+}
+
+static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
+}
+
+uint64_t sw64_ldw_phys(CPUState *cs, hwaddr addr)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ int32_t ret;
+ CPUSW64State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ ret = (int32_t)address_space_ldl(as, addr, attrs, NULL);
+
+ return (uint64_t)(int64_t)ret;
+}
+
+void sw64_stw_phys(CPUState *cs, hwaddr addr, uint64_t val)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ address_space_stl(as, addr, (uint32_t)val, attrs, NULL);
+}
+
+uint64_t sw64_ldl_phys(CPUState *cs, hwaddr addr)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ return address_space_ldq(as, addr, attrs, NULL);
+}
+
+void sw64_stl_phys(CPUState *cs, hwaddr addr, uint64_t val)
+{
+ SW64CPU *cpu = SW64_CPU(cs);
+ CPUSW64State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ address_space_stq(as, addr, val, attrs, NULL);
+}
+
+uint64_t helper_pri_ldw(CPUSW64State *env, uint64_t hwaddr)
+{
+ CPUState *cs = CPU(sw64_env_get_cpu(env));
+ return sw64_ldw_phys(cs, hwaddr);
+}
+
+void helper_pri_stw(CPUSW64State *env, uint64_t val, uint64_t hwaddr)
+{
+ CPUState *cs = CPU(sw64_env_get_cpu(env));
+ sw64_stw_phys(cs, hwaddr, val);
+}
+
+uint64_t helper_pri_ldl(CPUSW64State *env, uint64_t hwaddr)
+{
+ CPUState *cs = CPU(sw64_env_get_cpu(env));
+ return sw64_ldl_phys(cs, hwaddr);
+}
+
+void helper_pri_stl(CPUSW64State *env, uint64_t val, uint64_t hwaddr)
+{
+ CPUState *cs = CPU(sw64_env_get_cpu(env));
+ sw64_stl_phys(cs, hwaddr, val);
+}
+#endif
diff --git a/target/sw64/kvm.c b/target/sw64/kvm.c
new file mode 100644
index 0000000000..fc134c83fb
--- /dev/null
+++ b/target/sw64/kvm.c
@@ -0,0 +1,215 @@
+/*
+ * SW64 implementation of KVM hooks
+ *
+ * Copyright (c) 2018 Lin Hainan
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_sw64.h"
+#include "cpu.h"
+#include "exec/memattrs.h"
+#include "exec/address-spaces.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+
+#define init_pc 0xffffffff80011000
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+/* 50000 jump to bootlader while 2f00000 jump to bios*/
+int kvm_sw64_vcpu_init(CPUState *cs)
+{
+ struct kvm_regs *regs;
+ SW64CPU *cpu = SW64_CPU(cs);
+ regs = (struct kvm_regs *)cpu->k_regs;
+ regs->pc = init_pc;
+ return kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+}
+
+static void kvm_sw64_host_cpu_class_init(ObjectClass *oc, void *data)
+{
+}
+
+static void kvm_sw64_host_cpu_initfn(Object *obj)
+{
+}
+
+
+static const TypeInfo host_sw64_cpu_type_info = {
+ .name = TYPE_SW64_HOST_CPU,
+ .parent = TYPE_SW64_CPU,
+ .instance_init = kvm_sw64_host_cpu_initfn,
+ .class_init = kvm_sw64_host_cpu_class_init,
+ .class_size = sizeof(SW64HostCPUClass),
+};
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ kvm_async_interrupts_allowed = true;
+
+ type_register_static(&host_sw64_cpu_type_info);
+
+ return 0;
+}
+
+/* 50000 jump to bootlader while 2f00000 jump to bios*/
+void kvm_sw64_reset_vcpu(SW64CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_regs *regs;
+ int ret;
+
+ regs = (struct kvm_regs *)cpu->k_regs;
+ regs->pc = init_pc;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+
+ if (ret < 0) {
+ fprintf(stderr, "kvm_sw64_vcpu_init failed: %s\n", strerror(-ret));
+ abort();
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SW64_VCPU_INIT, NULL);
+
+ if (ret < 0) {
+ fprintf(stderr, "kvm_sw64_vcpu_init failed: %s\n", strerror(-ret));
+ abort();
+ }
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+ return cpu->cpu_index;
+}
+
+#include <pthread.h>
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ int ret;
+ ret = kvm_sw64_vcpu_init(cs);
+ if (ret) {
+ return ret;
+ }
+ return 0;
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ int ret;
+ SW64CPU *cpu = SW64_CPU(cs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &cpu->k_regs);
+ if (ret < 0)
+ return ret;
+ return kvm_vcpu_ioctl(cs, KVM_SW64_GET_VCB, &cpu->k_vcb);
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ int ret;
+ SW64CPU *cpu = SW64_CPU(cs);
+ struct vcpucb *vcb;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &cpu->k_regs);
+ if (ret < 0)
+ return ret;
+ vcb = (struct vcpucb *)cpu->k_vcb;
+ vcb->whami = kvm_arch_vcpu_id(cs);
+ fprintf(stderr,"vcpu %ld init.\n", vcb->whami);
+ return kvm_vcpu_ioctl(cs, KVM_SW64_SET_VCB, &cpu->k_vcb);
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return -1;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ return 0;
+}
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ return -1;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ return true;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return 0;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+ /* We know at this point that we're using the in-kernel
+ * irqchip, so we can use irqfds, and on x86 we know
+ * we can use msi via irqfd and GSI routing.
+ */
+ kvm_msi_via_irqfd_allowed = true;
+ kvm_gsi_routing_allowed = true;
+}
+
+int kvm_arch_irqchip_create(KVMState *s)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return -1;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ return -1;
+}
+
+
+void kvm_sw64_register_slave(SW64CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ kvm_vcpu_ioctl(cs, KVM_SW64_USE_SLAVE, NULL);
+}
+
+bool kvm_arch_cpu_check_are_resettable(void)
+{
+ return true;
+}
diff --git a/target/sw64/kvm_sw64.h b/target/sw64/kvm_sw64.h
new file mode 100644
index 0000000000..5ebd4ec6fd
--- /dev/null
+++ b/target/sw64/kvm_sw64.h
@@ -0,0 +1,47 @@
+/*
+ * QEMU KVM support -- SW64 specific functions.
+ *
+ * Copyright (c) 2018 Lin Hainan
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_SW64_H
+#define QEMU_KVM_SW64_H
+
+#include "sysemu/kvm.h"
+#include "exec/memory.h"
+#include "qemu/error-report.h"
+
+/**
+ * kvm_sw64_vcpu_init:
+ * @cs: CPUState
+ *
+ * Initialize (or reinitialize) the VCPU by invoking the
+ * KVM_SW64_VCPU_INIT ioctl with the CPU type and feature
+ * bitmask specified in the CPUState.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+int kvm_sw64_vcpu_init(CPUState *cs);
+void kvm_sw64_reset_vcpu(SW64CPU *cpu);
+void kvm_sw64_register_slave(SW64CPU *cpu);
+
+#define TYPE_SW64_HOST_CPU "host-" TYPE_SW64_CPU
+#define SW64_HOST_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SW64HostCPUClass, (klass), TYPE_SW64_HOST_CPU)
+#define SW64_HOST_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SW64HostCPUClass, (obj), TYPE_SW64_HOST_CPU)
+
+typedef struct SW64HostCPUClass {
+ /*< private >*/
+ SW64CPUClass parent_class;
+ /*< public >*/
+
+ uint64_t features;
+ uint32_t target;
+ const char *dtb_compatible;
+} SW64HostCPUClass;
+#endif
diff --git a/target/sw64/machine.c b/target/sw64/machine.c
new file mode 100644
index 0000000000..df18d3faba
--- /dev/null
+++ b/target/sw64/machine.c
@@ -0,0 +1,18 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "migration/vmstate.h"
+#include "migration/cpu.h"
+
+VMStateDescription vmstate_sw64_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+#ifdef CONFIG_KVM
+ VMSTATE_UINTTL_ARRAY(k_regs, SW64CPU, 158),
+ VMSTATE_UINTTL_ARRAY(k_vcb, SW64CPU, 36),
+#endif
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/sw64/meson.build b/target/sw64/meson.build
new file mode 100644
index 0000000000..ee49e45927
--- /dev/null
+++ b/target/sw64/meson.build
@@ -0,0 +1,19 @@
+sw64_ss = ss.source_set()
+sw64_ss.add(files(
+ 'cpu.c',
+ 'exception.c',
+ 'float_helper.c',
+ 'helper.c',
+ 'int_helper.c',
+ 'profile.c',
+ 'simd_helper.c',
+ 'translate.c',
+))
+
+sw64_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
+
+sw64_softmmu_ss = ss.source_set()
+sw64_softmmu_ss.add(files('machine.c'))
+
+target_arch += {'sw64': sw64_ss}
+target_softmmu_arch += {'sw64': sw64_softmmu_ss}
diff --git a/target/sw64/profile.c b/target/sw64/profile.c
new file mode 100644
index 0000000000..73fe077234
--- /dev/null
+++ b/target/sw64/profile.c
@@ -0,0 +1,2342 @@
+#include "translate.h"
+
+const char *insn_opc[535] = {
+ "sys_call", "call", "ret", "jmp", "br", "bsr", "memb", "imemb",
+ "wmemb", "rtc", "rcid", "halt", "rd_f", "wr_f", "rtid",
+ "csrws", "csrwc", "pri_rcsr", "pri_wcsr", "pri_ret", "lldw", "lldl",
+ "ldw_inc", "ldl_inc", "ldw_dec", "ldl_dec", "ldw_set", "ldl_set", "lstw",
+ "lstl", "ldw_nc", "ldl_nc", "ldd_nc", "stw_nc", "stl_nc", "std_nc",
+ "ldwe", "ldse", "ldde", "vlds", "vldd", "vsts", "vstd",
+ "fimovs", "fimovd", "addw", "subw", "s4addw", "s4subw", "s8addw",
+ "s8subw", "addl", "subl", "s4addl", "s4subl", "s8addl", "s8subl",
+ "mulw", "divw", "udivw", "remw", "uremw", "mull", "mulh",
+ "divl", "udivl", "reml", "ureml", "addpi", "addpis", "cmpeq",
+ "cmplt", "cmple", "cmpult", "cmpule", "sbt", "cbt", "and",
+ "bic", "bis", "ornot", "xor", "eqv", "inslb", "inslh",
+ "inslw", "insll", "inshb", "inshh", "inshw", "inshl", "slll",
+ "srll", "sral", "roll", "sllw", "srlw", "sraw", "rolw",
+ "extlb", "extlh", "extlw", "extll", "exthb", "exthh", "exthw",
+ "exthl", "ctpop", "ctlz", "cttz", "revbh", "revbw", "revbl",
+ "casw", "casl", "masklb", "masklh", "masklw", "maskll", "maskhb",
+ "maskhh", "maskhw", "maskhl", "zap", "zapnot", "sextb", "sexth",
+ "seleq", "selge", "selgt", "selle", "sellt", "selne", "sellbc",
+ "sellbs", "addwi", "subwi", "s4addwi", "s4subwi", "s8addwi", "s8subwi",
+ "addli", "subli", "s4addli", "s4subli", "s8addli", "s8subli", "mulwi",
+ "divwi", "udivwi", "remwi", "uremwi", "mulli", "mulhi", "divli",
+ "udivli", "remli", "uremli", "addpii", "addpisi", "cmpeqi", "cmplti",
+ "cmplei", "cmpulti", "cmpulei", "sbti", "cbti", "andi", "bici",
+ "bisi", "ornoti", "xori", "eqvi", "inslbi", "inslhi", "inslwi",
+ "inslli", "inshbi", "inshhi", "inshwi", "inshli", "sllli", "srlli",
+ "srali", "rolli", "sllwi", "srlwi", "srawi", "rolwi", "extlbi",
+ "extlhi", "extlwi", "extlli", "exthbi", "exthhi", "exthwi", "exthli",
+ "ctpopi", "ctlzi", "cttzi", "revbhi", "revbwi", "revbli", "caswi",
+ "casli", "masklbi", "masklhi", "masklwi", "masklli", "maskhbi", "maskhhi",
+ "maskhwi", "maskhli", "zapi", "zapnoti", "sextbi", "sexthi", "cmpgebi",
+ "seleqi", "selgei", "selgti", "sellei", "sellti", "selnei", "sellbci",
+ "sellbsi", "vlogzz", "fadds", "faddd", "fsubs", "fsubd", "fmuls",
+ "fmuld", "fdivs", "fdivd", "fsqrts", "fsqrtd", "fcmpeq", "fcmple",
+ "fcmplt", "fcmpun", "fcvtsd", "fcvtds", "fcvtdl_g", "fcvtdl_p", "fcvtdl_z",
+ "fcvtdl_n", "fcvtdl", "fcvtwl", "fcvtlw", "fcvtls", "fcvtld", "fcpys",
+ "fcpyse", "fcpysn", "ifmovs", "ifmovd", "rfpcr", "wfpcr", "setfpec0",
+ "setfpec1", "setfpec2", "setfpec3", "frecs", "frecd", "fris", "fris_g",
+ "fris_p", "fris_z", "fris_n", "frid", "frid_g", "frid_p", "frid_z",
+ "frid_n", "fmas", "fmad", "fmss", "fmsd", "fnmas", "fnmad",
+ "fnmss", "fnmsd", "fseleq", "fselne", "fsellt", "fselle", "fselgt",
+ "fselge", "vaddw", "vaddwi", "vsubw", "vsubwi", "vcmpgew", "vcmpgewi",
+ "vcmpeqw", "vcmpeqwi", "vcmplew", "vcmplewi", "vcmpltw", "vcmpltwi", "vcmpulew",
+ "vcmpulewi", "vcmpultw", "vcmpultwi", "vsllw", "vsllwi", "vsrlw", "vsrlwi",
+ "vsraw", "vsrawi", "vrolw", "vrolwi", "sllow", "sllowi", "srlow",
+ "srlowi", "vaddl", "vaddli", "vsubl", "vsubli", "vsllb", "vsllbi",
+ "vsrlb", "vsrlbi", "vsrab", "vsrabi", "vrolb", "vrolbi", "vsllh",
+ "vsllhi", "vsrlh", "vsrlhi", "vsrah", "vsrahi", "vrolh", "vrolhi",
+ "ctpopow", "ctlzow", "vslll", "vsllli", "vsrll", "vsrlli", "vsral",
+ "vsrali", "vroll", "vrolli", "vmaxb", "vminb", "vucaddw", "vucaddwi",
+ "vucsubw", "vucsubwi", "vucaddh", "vucaddhi", "vucsubh", "vucsubhi", "vucaddb",
+ "vucaddbi", "vucsubb", "vucsubbi", "sraow", "sraowi", "vsumw", "vsuml",
+ "vsm4r", "vbinvw", "vcmpueqb", "vcmpugtb", "vcmpugtbi", "vsm3msw", "vmaxh",
+ "vminh", "vmaxw", "vminw", "vmaxl", "vminl", "vumaxb", "vuminb",
+ "vumaxh", "vuminh", "vumaxw", "vuminw", "vumaxl", "vuminl", "vsm4key",
+ "vadds", "vaddd", "vsubs", "vsubd", "vmuls", "vmuld", "vdivs",
+ "vdivd", "vsqrts", "vsqrtd", "vfcmpeq", "vfcmple", "vfcmplt", "vfcmpun",
+ "vcpys", "vcpyse", "vcpysn", "vsums", "vsumd", "vfcvtsd", "vfcvtds",
+ "vfcvtls", "vfcvtld", "vfcvtdl", "vfcvtdl_g", "vfcvtdl_p", "vfcvtdl_z", "vfcvtdl_n",
+ "vfris", "vfris_g", "vfris_p", "vfris_z", "vfris_n", "vfrid", "vfrid_g",
+ "vfrid_p", "vfrid_z", "vfrid_n", "vfrecs", "vfrecd", "vmaxs", "vmins",
+ "vmaxd", "vmind", "vmas", "vmad", "vmss", "vmsd", "vnmas",
+ "vnmad", "vnmss", "vnmsd", "vfseleq", "vfsellt", "vfselle", "vseleqw",
+ "vseleqwi", "vsellbcw", "vsellbcwi", "vselltw", "vselltwi", "vsellew", "vsellewi",
+ "vinsw", "vinsf", "vextw", "vextf", "vcpyw", "vcpyf", "vconw",
+ "vshfw", "vcons", "vcond", "vinsb", "vinsh", "vinsectlh", "vinsectlw",
+ "vinsectll", "vinsectlb", "vshfq", "vshfqb", "vcpyb", "vcpyh", "vsm3r",
+ "vfcvtsh", "vfcvths", "vldw_u", "vstw_u", "vlds_u", "vsts_u", "vldd_u",
+ "vstd_u", "vstw_ul", "vstw_uh", "vsts_ul", "vsts_uh", "vstd_ul", "vstd_uh",
+ "vldd_nc", "vstd_nc", "lbr", "ldbu_a", "ldhu_a", "ldw_a", "ldl_a",
+ "flds_a", "fldd_a", "stbu_a", "sthu_a", "stw_a", "stl_a", "fsts_a",
+ "fstd_a", "dpfhr", "dpfhw", "ldbu", "ldhu", "ldw", "ldl",
+ "ldl_u", "pri_ldl", "pri_ldw", "flds", "fldd", "stb", "sth",
+ "stw", "stl", "stl_u", "pri_stl", "pri_stw", "fsts", "fstd",
+ "beq", "bne", "blt", "ble", "bgt", "bge", "blbc",
+ "blbs", "fbeq", "fbne", "fblt", "fble", "fbgt", "fbge",
+ "ldih", "ldi", };
+
+void insn_profile(DisasContext *ctx, uint32_t insn)
+{
+ int32_t disp16, disp26 __attribute__((unused));
+ uint8_t opc;
+ uint16_t fn3, fn4, fn6, fn8, fn11;
+ TCGv count;
+ int index, offs;
+
+ opc = extract32(insn, 26, 6);
+
+ fn3 = extract32(insn, 10, 3);
+ fn6 = extract32(insn, 10, 6);
+ fn4 = extract32(insn, 12, 4);
+ fn8 = extract32(insn, 5, 8);
+ fn11 = extract32(insn, 5, 11);
+
+ disp16 = sextract32(insn, 0, 16);
+ disp26 = sextract32(insn, 0, 26);
+
+ index = 0;
+ switch (opc) {
+ case 0x00:
+ /* SYS_CALL */
+ index = SYS_CALL;
+ break;
+ case 0x01:
+ /* CALL */
+ index = CALL;
+ break;
+ case 0x02:
+ /* RET */
+ index = RET;
+ break;
+ case 0x03:
+ /* JMP */
+ index = JMP;
+ break;
+ case 0x04:
+ /* BR */
+ index = BR;
+ break;
+ case 0x05:
+ /* BSR */
+ index = BSR;
+ break;
+ case 0x06:
+ switch (disp16) {
+ case 0x0000:
+ /* MEMB */
+ index = MEMB;
+ break;
+ case 0x0001:
+ /* IMEMB */
+ index = IMEMB;
+ break;
+ case 0x0002:
+ /* WMEMB */
+ index = WMEMB;
+ break;
+ case 0x0020:
+ /* RTC */
+ index = RTC;
+ break;
+ case 0x0040:
+ /* RCID */
+ index = RCID;
+ break;
+ case 0x0080:
+ /* HALT */
+ index = HALT;
+ break;
+ case 0x1000:
+ /* RD_F */
+ index = RD_F;
+ break;
+ case 0x1020:
+ /* WR_F */
+ index = WR_F;
+ break;
+ case 0x1040:
+ /* RTID */
+ index = RTID;
+ break;
+ default:
+ if ((disp16 & 0xFF00) == 0xFC00) {
+ /* CSRWS */
+ index = CSRWS;
+ break;
+ }
+ if ((disp16 & 0xFF00) == 0xFD00) {
+ /* CSRWC */
+ index = CSRWC;
+ break;
+ }
+ if ((disp16 & 0xFF00) == 0xFE00) {
+ /* PRI_RCSR */
+ index = PRI_RCSR;
+ break;
+ }
+ if ((disp16 & 0xFF00) == 0xFF00) {
+ /* PRI_WCSR */
+ index = PRI_WCSR;
+ break;
+ }
+ goto do_invalid;
+ }
+ break;
+ case 0x07:
+ /* PRI_RET */
+ index = PRI_RET;
+ break;
+ case 0x08:
+ switch (fn4) {
+ case 0x0:
+ /* LLDW */
+ index = LLDW;
+ break;
+ case 0x1:
+ /* LLDL */
+ index = LLDL;
+ break;
+ case 0x2:
+ /* LDW_INC */
+ index = LDW_INC;
+ break;
+ case 0x3:
+ /* LDL_INC */
+ index = LDL_INC;
+ break;
+ case 0x4:
+ /* LDW_DEC */
+ index = LDW_DEC;
+ break;
+ case 0x5:
+ /* LDL_DEC */
+ index = LDL_DEC;
+ break;
+ case 0x6:
+ /* LDW_SET */
+ index = LDW_SET;
+ break;
+ case 0x7:
+ /* LDL_SET */
+ index = LDL_SET;
+ break;
+ case 0x8:
+ /* LSTW */
+ index = LSTW;
+ break;
+ case 0x9:
+ /* LSTL */
+ index = LSTL;
+ break;
+ case 0xa:
+ /* LDW_NC */
+ index = LDW_NC;
+ break;
+ case 0xb:
+ /* LDL_NC */
+ index = LDL_NC;
+ break;
+ case 0xc:
+ /* LDD_NC */
+ index = LDD_NC;
+ break;
+ case 0xd:
+ /* STW_NC */
+ index = STW_NC;
+ break;
+ case 0xe:
+ /* STL_NC */
+ index = STL_NC;
+ break;
+ case 0xf:
+ /* STD_NC */
+ index = STD_NC;
+ break;
+ default:
+ goto do_invalid;
+ }
+ break;
+ case 0x9:
+ /* LDWE */
+ index = LDWE;
+ break;
+ case 0x0a:
+ /* LDSE */
+ index = LDSE;
+ break;
+ case 0x0b:
+ /* LDDE */
+ index = LDDE;
+ break;
+ case 0x0c:
+ /* VLDS */
+ index = VLDS;
+ break;
+ case 0x0d:
+ /* VLDD */
+ index = VLDD;
+ break;
+ case 0x0e:
+ /* VSTS */
+ index = VSTS;
+ break;
+ case 0x0f:
+ /* VSTD */
+ index = VSTD;
+ break;
+ case 0x10:
+ if (fn11 == 0x70) {
+ /* FIMOVS */
+ index = FIMOVS;
+ } else if (fn11 == 0x78) {
+ /* FIMOVD */
+ index = FIMOVD;
+ } else {
+ switch (fn11 & 0xff) {
+ case 0x00:
+ /* ADDW */
+ index = ADDW;
+ break;
+ case 0x01:
+ /* SUBW */
+ index = SUBW;
+ break;
+ case 0x02:
+ /* S4ADDW */
+ index = S4ADDW;
+ break;
+ case 0x03:
+ /* S4SUBW */
+ index = S4SUBW;
+ break;
+ case 0x04:
+ /* S8ADDW */
+ index = S8ADDW;
+ break;
+ case 0x05:
+ /* S8SUBW */
+ index = S8SUBW;
+ break;
+
+ case 0x08:
+ /* ADDL */
+ index = ADDL;
+ break;
+ case 0x09:
+ /* SUBL */
+ index = SUBL;
+ break;
+ case 0x0a:
+ /* S4ADDL */
+ index = S4ADDL;
+ break;
+ case 0x0b:
+ /* S4SUBL */
+ index = S4SUBL;
+ break;
+ case 0x0c:
+ /* S8ADDL */
+ index = S8ADDL;
+ break;
+ case 0x0d:
+ /* S8SUBL */
+ index = S8SUBL;
+ break;
+ case 0x10:
+ /* MULW */
+ index = MULW;
+ break;
+ case 0x11:
+ /* DIVW */
+ index = DIVW;
+ break;
+ case 0x12:
+ /* UDIVW */
+ index = UDIVW;
+ break;
+ case 0x13:
+ /* REMW */
+ index = REMW;
+ break;
+ case 0x14:
+ /* UREMW */
+ index = UREMW;
+ break;
+ case 0x18:
+ /* MULL */
+ index = MULL;
+ break;
+ case 0x19:
+ /* MULH */
+ index = MULH;
+ break;
+ case 0x1A:
+ /* DIVL */
+ index = DIVL;
+ break;
+ case 0x1B:
+ /* UDIVL */
+ index = UDIVL;
+ break;
+ case 0x1C:
+ /* REML */
+ index = REML;
+ break;
+ case 0x1D:
+ /* UREML */
+ index = UREML;
+ break;
+ case 0x1E:
+ /* ADDPI */
+ index = ADDPI;
+ break;
+ case 0x1F:
+ /* ADDPIS */
+ index = ADDPIS;
+ break;
+ case 0x28:
+ /* CMPEQ */
+ index = CMPEQ;
+ break;
+ case 0x29:
+ /* CMPLT */
+ index = CMPLT;
+ break;
+ case 0x2a:
+ /* CMPLE */
+ index = CMPLE;
+ break;
+ case 0x2b:
+ /* CMPULT */
+ index = CMPULT;
+ break;
+ case 0x2c:
+ /* CMPULE */
+ index = CMPULE;
+ break;
+ case 0x2D:
+ /* SBT */
+ index = SBT;
+ break;
+ case 0x2E:
+ /* CBT */
+ index = CBT;
+ break;
+ case 0x38:
+ /* AND */
+ index = AND;
+ break;
+ case 0x39:
+ /* BIC */
+ index = BIC;
+ break;
+ case 0x3a:
+ /* BIS */
+ index = BIS;
+ break;
+ case 0x3b:
+ /* ORNOT */
+ index = ORNOT;
+ break;
+ case 0x3c:
+ /* XOR */
+ index = XOR;
+ break;
+ case 0x3d:
+ /* EQV */
+ index = EQV;
+ break;
+ case 0x40:
+ /* INSLB */
+ index = INSLB;
+ break;
+ case 0x41:
+ /* INSLH */
+ index = INSLH;
+ break;
+ case 0x42:
+ /* INSLW */
+ index = INSLW;
+ break;
+ case 0x43:
+ /* INSLL */
+ index = INSLL;
+ break;
+ case 0x44:
+ /* INSHB */
+ index = INSHB;
+ break;
+ case 0x45:
+ /* INSHH */
+ index = INSHH;
+ break;
+ case 0x46:
+ /* INSHW */
+ index = INSHW;
+ break;
+ case 0x47:
+ /* INSHL */
+ index = INSHL;
+ break;
+ case 0x48:
+ /* SLLL */
+ index = SLLL;
+ break;
+ case 0x49:
+ /* SRLL */
+ index = SRLL;
+ break;
+ case 0x4a:
+ /* SRAL */
+ index = SRAL;
+ break;
+ case 0x4B:
+ /* ROLL */
+ index = ROLL;
+ break;
+ case 0x4C:
+ /* SLLW */
+ index = SLLW;
+ break;
+ case 0x4D:
+ /* SRLW */
+ index = SRLW;
+ break;
+ case 0x4E:
+ /* SRAW */
+ index = SRAW;
+ break;
+ case 0x4F:
+ /* ROLW */
+ index = ROLW;
+ break;
+ case 0x50:
+ /* EXTLB */
+ index = EXTLB;
+ break;
+ case 0x51:
+ /* EXTLH */
+ index = EXTLH;
+ break;
+ case 0x52:
+ /* EXTLW */
+ index = EXTLW;
+ break;
+ case 0x53:
+ /* EXTLL */
+ index = EXTLL;
+ break;
+ case 0x54:
+ /* EXTHB */
+ index = EXTHB;
+ break;
+ case 0x55:
+ /* EXTHH */
+ index = EXTHH;
+ break;
+ case 0x56:
+ /* EXTHW */
+ index = EXTHW;
+ break;
+ case 0x57:
+ /* EXTHL */
+ index = EXTHL;
+ break;
+ case 0x58:
+ /* CTPOP */
+ index = CTPOP;
+ break;
+ case 0x59:
+ /* CTLZ */
+ index = CTLZ;
+ break;
+ case 0x5a:
+ /* CTTZ */
+ index = CTTZ;
+ break;
+ case 0x5B:
+ /* REVBH */
+ index = REVBH;
+ break;
+ case 0x5C:
+ /* REVBW */
+ index = REVBW;
+ break;
+ case 0x5D:
+ /* REVBL */
+ index = REVBL;
+ break;
+ case 0x5E:
+ /* CASW */
+ index = CASW;
+ break;
+ case 0x5F:
+ /* CASL */
+ index = CASL;
+ break;
+ case 0x60:
+ /* MASKLB */
+ index = MASKLB;
+ break;
+ case 0x61:
+ /* MASKLH */
+ index = MASKLH;
+ break;
+ case 0x62:
+ /* MASKLW */
+ index = MASKLW;
+ break;
+ case 0x63:
+ /* MASKLL */
+ index = MASKLL;
+ break;
+ case 0x64:
+ /* MASKHB */
+ index = MASKHB;
+ break;
+ case 0x65:
+ /* MASKHH */
+ index = MASKHH;
+ break;
+ case 0x66:
+ /* MASKHW */
+ index = MASKHW;
+ break;
+ case 0x67:
+ /* MASKHL */
+ index = MASKHL;
+ break;
+ case 0x68:
+ /* ZAP */
+ index = ZAP;
+ break;
+ case 0x69:
+ /* ZAPNOT */
+ index = ZAPNOT;
+ break;
+ case 0x6a:
+ /* SEXTB */
+ index = SEXTB;
+ break;
+ case 0x6b:
+ /* SEXTH */
+ index = SEXTH;
+ break;
+ case 0x6c:
+ /* CMPGEB*/
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case 0x11:
+ switch (fn3) {
+ case 0x0:
+ /* SELEQ */
+ index = SELEQ;
+ break;
+ case 0x1:
+ /* SELGE */
+ index = SELGE;
+ break;
+ case 0x2:
+ /* SELGT */
+ index = SELGT;
+ break;
+ case 0x3:
+ /* SELLE */
+ index = SELLE;
+ break;
+ case 0x4:
+ /* SELLT */
+ index = SELLT;
+ break;
+ case 0x5:
+ /* SELNE */
+ index = SELNE;
+ break;
+ case 0x6:
+ /* SELLBC */
+ index = SELLBC;
+ break;
+ case 0x7:
+ /* SELLBS */
+ index = SELLBS;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x12:
+ switch (fn8 & 0xff) {
+ case 0x00:
+ /* ADDWI */
+ index = ADDWI;
+ break;
+ case 0x01:
+ /* SUBWI */
+ index = SUBWI;
+ break;
+ case 0x02:
+ /* S4ADDWI */
+ index = S4ADDWI;
+ break;
+ case 0x03:
+ /* S4SUBWI */
+ index = S4SUBWI;
+ break;
+ case 0x04:
+ /* S8ADDWI */
+ index = S8ADDWI;
+ break;
+ case 0x05:
+ /* S8SUBWI */
+ index = S8SUBWI;
+ break;
+
+ case 0x08:
+ /* ADDLI */
+ index = ADDLI;
+ break;
+ case 0x09:
+ /* SUBLI */
+ index = SUBLI;
+ break;
+ case 0x0a:
+ /* S4ADDLI */
+ index = S4ADDLI;
+ break;
+ case 0x0b:
+ /* S4SUBLI */
+ index = S4SUBLI;
+ break;
+ case 0x0c:
+ /* S8ADDLI */
+ index = S8ADDLI;
+ break;
+ case 0x0d:
+ /* S8SUBLI */
+ index = S8SUBLI;
+ break;
+ case 0x10:
+ /* MULWI */
+ index = MULWI;
+ break;
+ case 0x11:
+ /* DIVWI */
+ index = DIVWI;
+ break;
+ case 0x12:
+ /* UDIVWI */
+ index = UDIVWI;
+ break;
+ case 0x13:
+ /* REMWI */
+ index = REMWI;
+ break;
+ case 0x14:
+ /* UREMWI */
+ index = UREMWI;
+ break;
+ case 0x18:
+ /* MULLI */
+ index = MULLI;
+ break;
+ case 0x19:
+ /* MULHI */
+ index = MULHI;
+ break;
+ case 0x1A:
+ /* DIVLI */
+ index = DIVLI;
+ break;
+ case 0x1B:
+ /* UDIVLI */
+ index = UDIVLI;
+ break;
+ case 0x1C:
+ /* REMLI */
+ index = REMLI;
+ break;
+ case 0x1D:
+ /* UREMLI */
+ index = UREMLI;
+ break;
+ case 0x1E:
+ /* ADDPII */
+ index = ADDPII;
+ break;
+ case 0x1F:
+ /* ADDPISI */
+ index = ADDPISI;
+ break;
+ case 0x28:
+ /* CMPEQI */
+ index = CMPEQI;
+ break;
+ case 0x29:
+ /* CMPLTI */
+ index = CMPLTI;
+ break;
+ case 0x2a:
+ /* CMPLEI */
+ index = CMPLEI;
+ break;
+ case 0x2b:
+ /* CMPULTI */
+ index = CMPULTI;
+ break;
+ case 0x2c:
+ /* CMPULEI */
+ index = CMPULEI;
+ break;
+ case 0x2D:
+ /* SBTI */
+ index = SBTI;
+ break;
+ case 0x2E:
+ /* CBTI */
+ index = CBTI;
+ break;
+ case 0x38:
+ /* ANDI */
+ index = ANDI;
+ break;
+ case 0x39:
+ /* BICI */
+ index = BICI;
+ break;
+ case 0x3a:
+ /* BISI */
+ index = BISI;
+ break;
+ case 0x3b:
+ /* ORNOTI */
+ index = ORNOTI;
+ break;
+ case 0x3c:
+ /* XORI */
+ index = XORI;
+ break;
+ case 0x3d:
+ /* EQVI */
+ index = EQVI;
+ break;
+ case 0x40:
+ /* INSLBI */
+ index = INSLBI;
+ break;
+ case 0x41:
+ /* INSLHI */
+ index = INSLHI;
+ break;
+ case 0x42:
+ /* INSLWI */
+ index = INSLWI;
+ break;
+ case 0x43:
+ /* INSLLI */
+ index = INSLLI;
+ break;
+ case 0x44:
+ /* INSHBI */
+ index = INSHBI;
+ break;
+ case 0x45:
+ /* INSHHI */
+ index = INSHHI;
+ break;
+ case 0x46:
+ /* INSHWI */
+ index = INSHWI;
+ break;
+ case 0x47:
+ /* INSHLI */
+ index = INSHLI;
+ break;
+ case 0x48:
+ /* SLLLI */
+ index = SLLLI;
+ break;
+ case 0x49:
+ /* SRLLI */
+ index = SRLLI;
+ break;
+ case 0x4a:
+ /* SRALI */
+ index = SRALI;
+ break;
+ case 0x4B:
+ /* ROLLI */
+ index = ROLLI;
+ break;
+ case 0x4C:
+ /* SLLWI */
+ index = SLLWI;
+ break;
+ case 0x4D:
+ /* SRLWI */
+ index = SRLWI;
+ break;
+ case 0x4E:
+ /* SRAWI */
+ index = SRAWI;
+ break;
+ case 0x4F:
+ /* ROLWI */
+ index = ROLWI;
+ break;
+ case 0x50:
+ /* EXTLBI */
+ index = EXTLBI;
+ break;
+ case 0x51:
+ /* EXTLHI */
+ index = EXTLHI;
+ break;
+ case 0x52:
+ /* EXTLWI */
+ index = EXTLWI;
+ break;
+ case 0x53:
+ /* EXTLLI */
+ index = EXTLLI;
+ break;
+ case 0x54:
+ /* EXTHBI */
+ index = EXTHBI;
+ break;
+ case 0x55:
+ /* EXTHHI */
+ index = EXTHHI;
+ break;
+ case 0x56:
+ /* EXTHWI */
+ index = EXTHWI;
+ break;
+ case 0x57:
+ /* EXTHLI */
+ index = EXTHLI;
+ break;
+ case 0x58:
+ /* CTPOPI */
+ index = CTPOPI;
+ break;
+ case 0x59:
+ /* CTLZI */
+ index = CTLZI;
+ break;
+ case 0x5a:
+ /* CTTZI */
+ index = CTTZI;
+ break;
+ case 0x5B:
+ /* REVBHI */
+ index = REVBHI;
+ break;
+ case 0x5C:
+ /* REVBWI */
+ index = REVBWI;
+ break;
+ case 0x5D:
+ /* REVBLI */
+ index = REVBLI;
+ break;
+ case 0x5E:
+ /* CASWI */
+ index = CASWI;
+ break;
+ case 0x5F:
+ /* CASLI */
+ index = CASLI;
+ break;
+ case 0x60:
+ /* MASKLBI */
+ index = MASKLBI;
+ break;
+ case 0x61:
+ /* MASKLHI */
+ index = MASKLHI;
+ break;
+ case 0x62:
+ /* MASKLWI */
+ index = MASKLWI;
+ break;
+ case 0x63:
+ /* MASKLLI */
+ index = MASKLLI;
+ break;
+ case 0x64:
+ /* MASKHBI */
+ index = MASKHBI;
+ break;
+ case 0x65:
+ /* MASKHHI */
+ index = MASKHHI;
+ break;
+ case 0x66:
+ /* MASKHWI */
+ index = MASKHWI;
+ break;
+ case 0x67:
+ /* MASKHLI */
+ index = MASKHLI;
+ break;
+ case 0x68:
+ /* ZAPI */
+ index = ZAPI;
+ break;
+ case 0x69:
+ /* ZAPNOTI */
+ index = ZAPNOTI;
+ break;
+ case 0x6a:
+ /* SEXTBI */
+ index = SEXTBI;
+ break;
+ case 0x6b:
+ /* SEXTHI */
+ index = SEXTHI;
+ break;
+ case 0x6c:
+ /* CMPGEBI */
+ index = CMPGEBI;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x13:
+ switch (fn3) {
+ case 0x0:
+ /* SELEQI */
+ index = SELEQI;
+ break;
+ case 0x1:
+ /* SELGEI */
+ index = SELGEI;
+ break;
+ case 0x2:
+ /* SELGTI */
+ index = SELGTI;
+ break;
+ case 0x3:
+ /* SELLEI */
+ index = SELLEI;
+ break;
+ case 0x4:
+ /* SELLTI */
+ index = SELLTI;
+ break;
+ case 0x5:
+ /* SELNEI */
+ index = SELNEI;
+ break;
+ case 0x6:
+ /* SELLBCI */
+ index = SELLBCI;
+ break;
+ case 0x7:
+ /* SELLBSI */
+ index = SELLBSI;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ /* VLOGZZ */
+ index = VLOGZZ;
+ break;
+ case 0x18:
+ switch (fn8) {
+ case 0x00:
+ /* FADDS */
+ index = FADDS;
+ break;
+ case 0x01:
+ /* FADDD */
+ index = FADDD;
+ break;
+ case 0x02:
+ /* FSUBS */
+ index = FSUBS;
+ break;
+ case 0x03:
+ /* FSUBD */
+ index = FSUBD;
+ break;
+ case 0x4:
+ /* FMULS */
+ index = FMULS;
+ break;
+ case 0x05:
+ /* FMULD */
+ index = FMULD;
+ break;
+ case 0x06:
+ /* FDIVS */
+ index = FDIVS;
+ break;
+ case 0x07:
+ /* FDIVD */
+ index = FDIVD;
+ break;
+ case 0x08:
+ /* FSQRTS */
+ index = FSQRTS;
+ break;
+ case 0x09:
+ /* FSQRTD */
+ index = FSQRTD;
+ break;
+ case 0x10:
+ /* FCMPEQ */
+ index = FCMPEQ;
+ break;
+ case 0x11:
+ /* FCMPLE */
+ index = FCMPLE;
+ break;
+ case 0x12:
+ /* FCMPLT */
+ index = FCMPLT;
+ break;
+ case 0x13:
+ /* FCMPUN */
+ index = FCMPUN;
+ break;
+ case 0x20:
+ /* FCVTSD */
+ index = FCVTSD;
+ break;
+ case 0x21:
+ /* FCVTDS */
+ index = FCVTDS;
+ break;
+ case 0x22:
+ /* FCVTDL_G */
+ index = FCVTDL_G;
+ break;
+ case 0x23:
+ /* FCVTDL_P */
+ index = FCVTDL_P;
+ break;
+ case 0x24:
+ /* FCVTDL_Z */
+ index = FCVTDL_Z;
+ break;
+ case 0x25:
+ /* FCVTDL_N */
+ index = FCVTDL_N;
+ break;
+ case 0x27:
+ /* FCVTDL */
+ index = FCVTDL;
+ break;
+ case 0x28:
+ /* FCVTWL */
+ index = FCVTWL;
+ break;
+ case 0x29:
+ /* FCVTLW */
+ index = FCVTLW;
+ break;
+ case 0x2d:
+ /* FCVTLS */
+ index = FCVTLS;
+ break;
+ case 0x2f:
+ /* FCVTLD */
+ index = FCVTLD;
+ break;
+ case 0x30:
+ /* FCPYS */
+ index = FCPYS;
+ break;
+ case 0x31:
+ /* FCPYSE */
+ index = FCPYSE;
+ break;
+ case 0x32:
+ /* FCPYSN */
+ index = FCPYSN;
+ break;
+ case 0x40:
+ /* IFMOVS */
+ index = IFMOVS;
+ break;
+ case 0x41:
+ /* IFMOVD */
+ index = IFMOVD;
+ break;
+ case 0x50:
+ /* RFPCR */
+ index = RFPCR;
+ break;
+ case 0x51:
+ /* WFPCR */
+ index = WFPCR;
+ break;
+ case 0x54:
+ /* SETFPEC0 */
+ index = SETFPEC0;
+ break;
+ case 0x55:
+ /* SETFPEC1 */
+ index = SETFPEC1;
+ break;
+ case 0x56:
+ /* SETFPEC2 */
+ index = SETFPEC2;
+ break;
+ case 0x57:
+ /* SETFPEC3 */
+ index = SETFPEC3;
+ break;
+ case 0x58:
+ /* FRECS */
+ index = FRECS;
+ break;
+ case 0x59:
+ /* FRECD */
+ index = FRECD;
+ break;
+ case 0x5A:
+ /* FRIS */
+ index = FRIS;
+ break;
+ case 0x5B:
+ /* FRIS_G */
+ index = FRIS_G;
+ break;
+ case 0x5C:
+ /* FRIS_P */
+ index = FRIS_P;
+ break;
+ case 0x5D:
+ /* FRIS_Z */
+ index = FRIS_Z;
+ break;
+ case 0x5F:
+ /* FRIS_N */
+ index = FRIS_N;
+ break;
+ case 0x60:
+ /* FRID */
+ index = FRID;
+ break;
+ case 0x61:
+ /* FRID_G */
+ index = FRID_G;
+ break;
+ case 0x62:
+ /* FRID_P */
+ index = FRID_P;
+ break;
+ case 0x63:
+ /* FRID_Z */
+ index = FRID_Z;
+ break;
+ case 0x64:
+ /* FRID_N */
+ index = FRID_N;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x19:
+ switch (fn6) {
+ case 0x00:
+ /* FMAS */
+ index = FMAS;
+ break;
+ case 0x01:
+ /* FMAD */
+ index = FMAD;
+ break;
+ case 0x02:
+ /* FMSS */
+ index = FMSS;
+ break;
+ case 0x03:
+ /* FMSD */
+ index = FMSD;
+ break;
+ case 0x04:
+ /* FNMAS */
+ index = FNMAS;
+ break;
+ case 0x05:
+ /* FNMAD */
+ index = FNMAD;
+ break;
+ case 0x06:
+ /* FNMSS */
+ index = FNMSS;
+ break;
+ case 0x07:
+ /* FNMSD */
+ index = FNMSD;
+ break;
+ case 0x10:
+ /* FSELEQ */
+ index = FSELEQ;
+ break;
+ case 0x11:
+ /* FSELNE */
+ index = FSELNE;
+ break;
+ case 0x12:
+ /* FSELLT */
+ index = FSELLT;
+ break;
+ case 0x13:
+ /* FSELLE */
+ index = FSELLE;
+ break;
+ case 0x14:
+ /* FSELGT */
+ index = FSELGT;
+ break;
+ case 0x15:
+ /* FSELGE */
+ index = FSELGE;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x1A:
+ switch (fn8) {
+ case 0x00:
+ /* VADDW */
+ index = VADDW;
+ break;
+ case 0x20:
+ /* VADDWI */
+ index = VADDWI;
+ break;
+ case 0x01:
+ /* VSUBW */
+ index = VSUBW;
+ break;
+ case 0x21:
+ /* VSUBWI */
+ index = VSUBWI;
+ break;
+ case 0x02:
+ /* VCMPGEW */
+ index = VCMPGEW;
+ break;
+ case 0x22:
+ /* VCMPGEWI */
+ index = VCMPGEWI;
+ break;
+ case 0x03:
+ /* VCMPEQW */
+ index = VCMPEQW;
+ break;
+ case 0x23:
+ /* VCMPEQWI */
+ index = VCMPEQWI;
+ break;
+ case 0x04:
+ /* VCMPLEW */
+ index = VCMPLEW;
+ break;
+ case 0x24:
+ /* VCMPLEWI */
+ index = VCMPLEWI;
+ break;
+ case 0x05:
+ /* VCMPLTW */
+ index = VCMPLTW;
+ break;
+ case 0x25:
+ /* VCMPLTWI */
+ index = VCMPLTWI;
+ break;
+ case 0x06:
+ /* VCMPULEW */
+ index = VCMPULEW;
+ break;
+ case 0x26:
+ /* VCMPULEWI */
+ index = VCMPULEWI;
+ break;
+ case 0x07:
+ /* VCMPULTW */
+ index = VCMPULTW;
+ break;
+ case 0x27:
+ /* VCMPULTWI */
+ index = VCMPULTWI;
+ break;
+ case 0x08:
+ /* VSLLW */
+ index = VSLLW;
+ break;
+ case 0x28:
+ /* VSLLWI */
+ index = VSLLWI;
+ break;
+ case 0x09:
+ /* VSRLW */
+ index = VSRLW;
+ break;
+ case 0x29:
+ /* VSRLWI */
+ index = VSRLWI;
+ break;
+ case 0x0A:
+ /* VSRAW */
+ index = VSRAW;
+ break;
+ case 0x2A:
+ /* VSRAWI */
+ index = VSRAWI;
+ break;
+ case 0x0B:
+ /* VROLW */
+ index = VROLW;
+ break;
+ case 0x2B:
+ /* VROLWI */
+ index = VROLWI;
+ break;
+ case 0x0C:
+ /* SLLOW */
+ index = SLLOW;
+ break;
+ case 0x2C:
+ /* SLLOWI */
+ index = SLLOWI;
+ break;
+ case 0x0D:
+ /* SRLOW */
+ index = SRLOW;
+ break;
+ case 0x2D:
+ /* SRLOWI */
+ index = SRLOWI;
+ break;
+ case 0x0E:
+ /* VADDL */
+ index = VADDL;
+ break;
+ case 0x2E:
+ /* VADDLI */
+ index = VADDLI;
+ break;
+ case 0x0F:
+ /* VSUBL */
+ index = VSUBL;
+ break;
+ case 0x2F:
+ /* VSUBLI */
+ index = VSUBLI;
+ break;
+ case 0x10:
+ /* VSLLB */
+ index = VSLLB;
+ break;
+ case 0x30:
+ /* VSLLBI */
+ index = VSLLBI;
+ break;
+ case 0x11:
+ /* VSRLB */
+ index = VSRLB;
+ break;
+ case 0x31:
+ /* VSRLBI */
+ index = VSRLBI;
+ break;
+ case 0x12:
+ /* VSRAB */
+ index = VSRAB;
+ break;
+ case 0x32:
+ /* VSRABI */
+ index = VSRABI;
+ break;
+ case 0x13:
+ /* VROLB */
+ index = VROLB;
+ break;
+ case 0x33:
+ /* VROLBI */
+ index = VROLBI;
+ break;
+ case 0x14:
+ /* VSLLH */
+ index = VSLLH;
+ break;
+ case 0x34:
+ /* VSLLHI */
+ index = VSLLHI;
+ break;
+ case 0x15:
+ /* VSRLH */
+ index = VSRLH;
+ break;
+ case 0x35:
+ /* VSRLHI */
+ index = VSRLHI;
+ break;
+ case 0x16:
+ /* VSRAH */
+ index = VSRAH;
+ break;
+ case 0x36:
+ /* VSRAHI */
+ index = VSRAHI;
+ break;
+ case 0x17:
+ /* VROLH */
+ index = VROLH;
+ break;
+ case 0x37:
+ /* VROLHI */
+ index = VROLHI;
+ break;
+ case 0x18:
+ /* CTPOPOW */
+ index = CTPOPOW;
+ break;
+ case 0x19:
+ /* CTLZOW */
+ index = CTLZOW;
+ break;
+ case 0x1A:
+ /* VSLLL */
+ index = VSLLL;
+ break;
+ case 0x3A:
+ /* VSLLLI */
+ index = VSLLLI;
+ break;
+ case 0x1B:
+ /* VSRLL */
+ index = VSRLL;
+ break;
+ case 0x3B:
+ /* VSRLLI */
+ index = VSRLLI;
+ break;
+ case 0x1C:
+ /* VSRAL */
+ index = VSRAL;
+ break;
+ case 0x3C:
+ /* VSRALI */
+ index = VSRALI;
+ break;
+ case 0x1D:
+ /* VROLL */
+ index = VROLL;
+ break;
+ case 0x3D:
+ /* VROLLI */
+ index = VROLLI;
+ break;
+ case 0x1E:
+ /* VMAXB */
+ index = VMAXB;
+ break;
+ case 0x1F:
+ /* VMINB */
+ index = VMINB;
+ break;
+ case 0x40:
+ /* VUCADDW */
+ index = VUCADDW;
+ break;
+ case 0x60:
+ /* VUCADDWI */
+ index = VUCADDWI;
+ break;
+ case 0x41:
+ /* VUCSUBW */
+ index = VUCSUBW;
+ break;
+ case 0x61:
+ /* VUCSUBWI */
+ index = VUCSUBWI;
+ break;
+ case 0x42:
+ /* VUCADDH */
+ index = VUCADDH;
+ break;
+ case 0x62:
+ /* VUCADDHI */
+ index = VUCADDHI;
+ break;
+ case 0x43:
+ /* VUCSUBH */
+ index = VUCSUBH;
+ break;
+ case 0x63:
+ /* VUCSUBHI */
+ index = VUCSUBHI;
+ break;
+ case 0x44:
+ /* VUCADDB */
+ index = VUCADDB;
+ break;
+ case 0x64:
+ /* VUCADDBI */
+ index = VUCADDBI;
+ break;
+ case 0x45:
+ /* VUCSUBB */
+ index = VUCSUBB;
+ break;
+ case 0x65:
+ /* VUCSUBBI */
+ index = VUCSUBBI;
+ break;
+ case 0x46:
+ /* SRAOW */
+ index = SRAOW;
+ break;
+ case 0x66:
+ /* SRAOWI */
+ index = SRAOWI;
+ break;
+ case 0x47:
+ /* VSUMW */
+ index = VSUMW;
+ break;
+ case 0x48:
+ /* VSUML */
+ index = VSUML;
+ break;
+ case 0x49:
+ /* VSM4R */
+ index = VSM4R;
+ break;
+ case 0x4A:
+ /* VBINVW */
+ index = VBINVW;
+ break;
+ case 0x4B:
+ /* VCMPUEQB */
+ index = VCMPUEQB;
+ break;
+ case 0x6B:
+ /* VCMPUEQBI*/
+ break;
+ case 0x4C:
+ /* VCMPUGTB */
+ index = VCMPUGTB;
+ break;
+ case 0x6C:
+ /* VCMPUGTBI */
+ index = VCMPUGTBI;
+ break;
+ case 0x4D:
+ /* VSM3MSW */
+ index = VSM3MSW;
+ break;
+ case 0x50:
+ /* VMAXH */
+ index = VMAXH;
+ break;
+ case 0x51:
+ /* VMINH */
+ index = VMINH;
+ break;
+ case 0x52:
+ /* VMAXW */
+ index = VMAXW;
+ break;
+ case 0x53:
+ /* VMINW */
+ index = VMINW;
+ break;
+ case 0x54:
+ /* VMAXL */
+ index = VMAXL;
+ break;
+ case 0x55:
+ /* VMINL */
+ index = VMINL;
+ break;
+ case 0x56:
+ /* VUMAXB */
+ index = VUMAXB;
+ break;
+ case 0x57:
+ /* VUMINB */
+ index = VUMINB;
+ break;
+ case 0x58:
+ /* VUMAXH */
+ index = VUMAXH;
+ break;
+ case 0x59:
+ /* VUMINH */
+ index = VUMINH;
+ break;
+ case 0x5A:
+ /* VUMAXW */
+ index = VUMAXW;
+ break;
+ case 0x5B:
+ /* VUMINW */
+ index = VUMINW;
+ break;
+ case 0x5C:
+ /* VUMAXL */
+ index = VUMAXL;
+ break;
+ case 0x5D:
+ /* VUMINL */
+ index = VUMINL;
+ break;
+ case 0x68:
+ /* VSM4KEY */
+ index = VSM4KEY;
+ break;
+ case 0x80:
+ /* VADDS */
+ index = VADDS;
+ break;
+ case 0x81:
+ /* VADDD */
+ index = VADDD;
+ break;
+ case 0x82:
+ /* VSUBS */
+ index = VSUBS;
+ break;
+ case 0x83:
+ /* VSUBD */
+ index = VSUBD;
+ break;
+ case 0x84:
+ /* VMULS */
+ index = VMULS;
+ break;
+ case 0x85:
+ /* VMULD */
+ index = VMULD;
+ break;
+ case 0x86:
+ /* VDIVS */
+ index = VDIVS;
+ break;
+ case 0x87:
+ /* VDIVD */
+ index = VDIVD;
+ break;
+ case 0x88:
+ /* VSQRTS */
+ index = VSQRTS;
+ break;
+ case 0x89:
+ /* VSQRTD */
+ index = VSQRTD;
+ break;
+ case 0x8C:
+ /* VFCMPEQ */
+ index = VFCMPEQ;
+ break;
+ case 0x8D:
+ /* VFCMPLE */
+ index = VFCMPLE;
+ break;
+ case 0x8E:
+ /* VFCMPLT */
+ index = VFCMPLT;
+ break;
+ case 0x8F:
+ /* VFCMPUN */
+ index = VFCMPUN;
+ break;
+ case 0x90:
+ /* VCPYS */
+ index = VCPYS;
+ break;
+ case 0x91:
+ /* VCPYSE */
+ index = VCPYSE;
+ break;
+ case 0x92:
+ /* VCPYSN */
+ index = VCPYSN;
+ break;
+ case 0x93:
+ /* VSUMS */
+ index = VSUMS;
+ break;
+ case 0x94:
+ /* VSUMD */
+ index = VSUMD;
+ break;
+ case 0x95:
+ /* VFCVTSD */
+ index = VFCVTSD;
+ break;
+ case 0x96:
+ /* VFCVTDS */
+ index = VFCVTDS;
+ break;
+ case 0x99:
+ /* VFCVTLS */
+ index = VFCVTLS;
+ break;
+ case 0x9A:
+ /* VFCVTLD */
+ index = VFCVTLD;
+ break;
+ case 0x9B:
+ /* VFCVTDL */
+ index = VFCVTDL;
+ break;
+ case 0x9C:
+ /* VFCVTDL_G */
+ index = VFCVTDL_G;
+ break;
+ case 0x9D:
+ /* VFCVTDL_P */
+ index = VFCVTDL_P;
+ break;
+ case 0x9E:
+ /* VFCVTDL_Z */
+ index = VFCVTDL_Z;
+ break;
+ case 0x9F:
+ /* VFCVTDL_N */
+ index = VFCVTDL_N;
+ break;
+ case 0xA0:
+ /* VFRIS */
+ index = VFRIS;
+ break;
+ case 0xA1:
+ /* VFRIS_G */
+ index = VFRIS_G;
+ break;
+ case 0xA2:
+ /* VFRIS_P */
+ index = VFRIS_P;
+ break;
+ case 0xA3:
+ /* VFRIS_Z */
+ index = VFRIS_Z;
+ break;
+ case 0xA4:
+ /* VFRIS_N */
+ index = VFRIS_N;
+ break;
+ case 0xA5:
+ /* VFRID */
+ index = VFRID;
+ break;
+ case 0xA6:
+ /* VFRID_G */
+ index = VFRID_G;
+ break;
+ case 0xA7:
+ /* VFRID_P */
+ index = VFRID_P;
+ break;
+ case 0xA8:
+ /* VFRID_Z */
+ index = VFRID_Z;
+ break;
+ case 0xA9:
+ /* VFRID_N */
+ index = VFRID_N;
+ break;
+ case 0xAA:
+ /* VFRECS */
+ index = VFRECS;
+ break;
+ case 0xAB:
+ /* VFRECD */
+ index = VFRECD;
+ break;
+ case 0xAC:
+ /* VMAXS */
+ index = VMAXS;
+ break;
+ case 0xAD:
+ /* VMINS */
+ index = VMINS;
+ break;
+ case 0xAE:
+ /* VMAXD */
+ index = VMAXD;
+ break;
+ case 0xAF:
+ /* VMIND */
+ index = VMIND;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x1B:
+ switch (fn6) {
+ case 0x00:
+ /* VMAS */
+ index = VMAS;
+ break;
+ case 0x01:
+ /* VMAD */
+ index = VMAD;
+ break;
+ case 0x02:
+ /* VMSS */
+ index = VMSS;
+ break;
+ case 0x03:
+ /* VMSD */
+ index = VMSD;
+ break;
+ case 0x04:
+ /* VNMAS */
+ index = VNMAS;
+ break;
+ case 0x05:
+ /* VNMAD */
+ index = VNMAD;
+ break;
+ case 0x06:
+ /* VNMSS */
+ index = VNMSS;
+ break;
+ case 0x07:
+ /* VNMSD */
+ index = VNMSD;
+ break;
+ case 0x10:
+ /* VFSELEQ */
+ index = VFSELEQ;
+ break;
+ case 0x12:
+ /* VFSELLT */
+ index = VFSELLT;
+ break;
+ case 0x13:
+ /* VFSELLE */
+ index = VFSELLE;
+ break;
+ case 0x18:
+ /* VSELEQW */
+ index = VSELEQW;
+ break;
+ case 0x38:
+ /* VSELEQWI */
+ index = VSELEQWI;
+ break;
+ case 0x19:
+ /* VSELLBCW */
+ index = VSELLBCW;
+ break;
+ case 0x39:
+ /* VSELLBCWI */
+ index = VSELLBCWI;
+ break;
+ case 0x1A:
+ /* VSELLTW */
+ index = VSELLTW;
+ break;
+ case 0x3A:
+ /* VSELLTWI */
+ index = VSELLTWI;
+ break;
+ case 0x1B:
+ /* VSELLEW */
+ index = VSELLEW;
+ break;
+ case 0x3B:
+ /* VSELLEWI */
+ index = VSELLEWI;
+ break;
+ case 0x20:
+ /* VINSW */
+ index = VINSW;
+ break;
+ case 0x21:
+ /* VINSF */
+ index = VINSF;
+ break;
+ case 0x22:
+ /* VEXTW */
+ index = VEXTW;
+ break;
+ case 0x23:
+ /* VEXTF */
+ index = VEXTF;
+ break;
+ case 0x24:
+ /* VCPYW */
+ index = VCPYW;
+ break;
+ case 0x25:
+ /* VCPYF */
+ index = VCPYF;
+ break;
+ case 0x26:
+ /* VCONW */
+ index = VCONW;
+ break;
+ case 0x27:
+ /* VSHFW */
+ index = VSHFW;
+ break;
+ case 0x28:
+ /* VCONS */
+ index = VCONS;
+ break;
+ case 0x29:
+ /* VCOND */
+ index = VCOND;
+ break;
+ case 0x2A:
+ /* VINSB */
+ index = VINSB;
+ break;
+ case 0x2B:
+ /* VINSH */
+ index = VINSH;
+ break;
+ case 0x2C:
+ /* VINSECTLH */
+ index = VINSECTLH;
+ break;
+ case 0x2D:
+ /* VINSECTLW */
+ index = VINSECTLW;
+ break;
+ case 0x2E:
+ /* VINSECTLL */
+ index = VINSECTLL;
+ break;
+ case 0x2F:
+ /* VINSECTLB */
+ index = VINSECTLB;
+ break;
+ case 0x30:
+ /* VSHFQ */
+ index = VSHFQ;
+ break;
+ case 0x31:
+ /* VSHFQB */
+ index = VSHFQB;
+ break;
+ case 0x32:
+ /* VCPYB */
+ index = VCPYB;
+ break;
+ case 0x33:
+ /* VCPYH */
+ index = VCPYH;
+ break;
+ case 0x34:
+ /* VSM3R */
+ index = VSM3R;
+ break;
+ case 0x35:
+ /* VFCVTSH */
+ index = VFCVTSH;
+ break;
+ case 0x36:
+ /* VFCVTHS */
+ index = VFCVTHS;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x1C:
+ switch (fn4) {
+ case 0x0:
+ /* VLDW_U */
+ index = VLDW_U;
+ break;
+ case 0x1:
+ /* VSTW_U */
+ index = VSTW_U;
+ break;
+ case 0x2:
+ /* VLDS_U */
+ index = VLDS_U;
+ break;
+ case 0x3:
+ /* VSTS_U */
+ index = VSTS_U;
+ break;
+ case 0x4:
+ /* VLDD_U */
+ index = VLDD_U;
+ break;
+ case 0x5:
+ /* VSTD_U */
+ index = VSTD_U;
+ break;
+ case 0x8:
+ /* VSTW_UL */
+ index = VSTW_UL;
+ break;
+ case 0x9:
+ /* VSTW_UH */
+ index = VSTW_UH;
+ break;
+ case 0xa:
+ /* VSTS_UL */
+ index = VSTS_UL;
+ break;
+ case 0xb:
+ /* VSTS_UH */
+ index = VSTS_UH;
+ break;
+ case 0xc:
+ /* VSTD_UL */
+ index = VSTD_UL;
+ break;
+ case 0xd:
+ /* VSTD_UH */
+ index = VSTD_UH;
+ break;
+ case 0xe:
+ /* VLDD_NC */
+ index = VLDD_NC;
+ break;
+ case 0xf:
+ /* VSTD_NC */
+ index = VSTD_NC;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x1D:
+ /* LBR */
+ index = LBR;
+ break;
+ case 0x1E:
+ switch (fn4) {
+ case 0x0:
+ /* LDBU_A */
+ index = LDBU_A;
+ break;
+ case 0x1:
+ /* LDHU_A */
+ index = LDHU_A;
+ break;
+ case 0x2:
+ /* LDW_A */
+ index = LDW_A;
+ break;
+ case 0x3:
+ /* LDL_A */
+ index = LDL_A;
+ break;
+ case 0x4:
+ /* FLDS_A */
+ index = FLDS_A;
+ break;
+ case 0x5:
+ /* FLDD_A */
+ index = FLDD_A;
+ break;
+ case 0x6:
+ /* STBU_A */
+ index = STBU_A;
+ break;
+ case 0x7:
+ /* STHU_A */
+ index = STHU_A;
+ break;
+ case 0x8:
+ /* STW_A */
+ index = STW_A;
+ break;
+ case 0x9:
+ /* STL_A */
+ index = STL_A;
+ break;
+ case 0xA:
+ /* FSTS_A */
+ index = FSTS_A;
+ break;
+ case 0xB:
+ /* FSTD_A */
+ index = FSTD_A;
+ break;
+ case 0xE:
+ /* DPFHR */
+ index = DPFHR;
+ break;
+ case 0xF:
+ /* DPFHW */
+ index = DPFHW;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x20:
+ /* LDBU */
+ index = LDBU;
+ break;
+ case 0x21:
+ /* LDHU */
+ index = LDHU;
+ break;
+ case 0x22:
+ /* LDW */
+ index = LDW;
+ break;
+ case 0x23:
+ /* LDL */
+ index = LDL;
+ break;
+ case 0x24:
+ /* LDL_U */
+ index = LDL_U;
+ break;
+ case 0x25:
+ if ((insn >> 12) & 1) {
+ /* PRI_LDL */
+ index = PRI_LDL;
+ } else {
+ /* PRI_LDW */
+ index = PRI_LDW;
+ }
+ break;
+ case 0x26:
+ /* FLDS */
+ index = FLDS;
+ break;
+ case 0x27:
+ /* FLDD */
+ index = FLDD;
+ break;
+ case 0x28:
+ /* STB */
+ index = STB;
+ break;
+ case 0x29:
+ /* STH */
+ index = STH;
+ break;
+ case 0x2a:
+ /* STW */
+ index = STW;
+ break;
+ case 0x2b:
+ /* STL */
+ index = STL;
+ break;
+ case 0x2c:
+ /* STL_U */
+ index = STL_U;
+ break;
+ case 0x2d:
+ if ((insn >> 12) & 1) {
+ /* PRI_STL */
+ index = PRI_STL;
+ } else {
+ /* PRI_STW */
+ index = PRI_STW;
+ }
+ break;
+ case 0x2e:
+ /* FSTS */
+ index = FSTS;
+ break;
+ case 0x2f:
+ /* FSTD */
+ index = FSTD;
+ break;
+ case 0x30:
+ /* BEQ */
+ index = BEQ;
+ break;
+ case 0x31:
+ /* BNE */
+ index = BNE;
+ break;
+ case 0x32:
+ /* BLT */
+ index = BLT;
+ break;
+ case 0x33:
+ /* BLE */
+ index = BLE;
+ break;
+ case 0x34:
+ /* BGT */
+ index = BGT;
+ break;
+ case 0x35:
+ /* BGE */
+ index = BGE;
+ break;
+ case 0x36:
+ /* BLBC */
+ index = BLBC;
+ break;
+ case 0x37:
+ /* BLBS */
+ index = BLBS;
+ break;
+ case 0x38:
+ /* FBEQ */
+ index = FBEQ;
+ break;
+ case 0x39:
+ /* FBNE */
+ index = FBNE;
+ break;
+ case 0x3a:
+ /* FBLT */
+ index = FBLT;
+ break;
+ case 0x3b:
+ /* FBLE */
+ index = FBLE;
+ break;
+ case 0x3c:
+ /* FBGT */
+ index = FBGT;
+ break;
+ case 0x3d:
+ /* FBGE */
+ index = FBGE;
+ break;
+ case 0x3f:
+ /* LDIH */
+ index = LDIH;
+ break;
+ case 0x3e:
+ /* LDI */
+ index = LDI;
+ break;
+ default:
+do_invalid:
+ break;
+ }
+ count = tcg_temp_new();
+ offs = offsetof(CPUSW64State, insn_count[index]);
+ tcg_gen_ld_i64(count, cpu_env, offs);
+ tcg_gen_addi_i64(count, count, 1);
+ tcg_gen_st_i64(count, cpu_env, offs);
+ tcg_temp_free(count);
+}
diff --git a/target/sw64/profile.h b/target/sw64/profile.h
new file mode 100644
index 0000000000..5aca541ea7
--- /dev/null
+++ b/target/sw64/profile.h
@@ -0,0 +1,541 @@
+#ifndef PROFILE_H
+#define PROFILE_H
+#define SYS_CALL 0
+#define CALL 1
+#define RET 2
+#define JMP 3
+#define BR 4
+#define BSR 5
+#define MEMB 6
+#define IMEMB 7
+#define WMEMB 8
+#define RTC 9
+#define RCID 10
+#define HALT 11
+#define RD_F 12
+#define WR_F 13
+#define RTID 14
+#define CSRWS 15
+#define CSRWC 16
+#define PRI_RCSR 17
+#define PRI_WCSR 18
+#define PRI_RET 19
+#define LLDW 20
+#define LLDL 21
+#define LDW_INC 22
+#define LDL_INC 23
+#define LDW_DEC 24
+#define LDL_DEC 25
+#define LDW_SET 26
+#define LDL_SET 27
+#define LSTW 28
+#define LSTL 29
+#define LDW_NC 30
+#define LDL_NC 31
+#define LDD_NC 32
+#define STW_NC 33
+#define STL_NC 34
+#define STD_NC 35
+#define LDWE 36
+#define LDSE 37
+#define LDDE 38
+#define VLDS 39
+#define VLDD 40
+#define VSTS 41
+#define VSTD 42
+#define FIMOVS 43
+#define FIMOVD 44
+#define ADDW 45
+#define SUBW 46
+#define S4ADDW 47
+#define S4SUBW 48
+#define S8ADDW 49
+#define S8SUBW 50
+#define ADDL 51
+#define SUBL 52
+#define S4ADDL 53
+#define S4SUBL 54
+#define S8ADDL 55
+#define S8SUBL 56
+#define MULW 57
+#define DIVW 58
+#define UDIVW 59
+#define REMW 60
+#define UREMW 61
+#define MULL 62
+#define MULH 63
+#define DIVL 64
+#define UDIVL 65
+#define REML 66
+#define UREML 67
+#define ADDPI 68
+#define ADDPIS 69
+#define CMPEQ 70
+#define CMPLT 71
+#define CMPLE 72
+#define CMPULT 73
+#define CMPULE 74
+#define SBT 75
+#define CBT 76
+#define AND 77
+#define BIC 78
+#define BIS 79
+#define ORNOT 80
+#define XOR 81
+#define EQV 82
+#define INSLB 83
+#define INSLH 84
+#define INSLW 85
+#define INSLL 86
+#define INSHB 87
+#define INSHH 88
+#define INSHW 89
+#define INSHL 90
+#define SLLL 91
+#define SRLL 92
+#define SRAL 93
+#define ROLL 94
+#define SLLW 95
+#define SRLW 96
+#define SRAW 97
+#define ROLW 98
+#define EXTLB 99
+#define EXTLH 100
+#define EXTLW 101
+#define EXTLL 102
+#define EXTHB 103
+#define EXTHH 104
+#define EXTHW 105
+#define EXTHL 106
+#define CTPOP 107
+#define CTLZ 108
+#define CTTZ 109
+#define REVBH 110
+#define REVBW 111
+#define REVBL 112
+#define CASW 113
+#define CASL 114
+#define MASKLB 115
+#define MASKLH 116
+#define MASKLW 117
+#define MASKLL 118
+#define MASKHB 119
+#define MASKHH 120
+#define MASKHW 121
+#define MASKHL 122
+#define ZAP 123
+#define ZAPNOT 124
+#define SEXTB 125
+#define SEXTH 126
+#define SELEQ 127
+#define SELGE 128
+#define SELGT 129
+#define SELLE 130
+#define SELLT 131
+#define SELNE 132
+#define SELLBC 133
+#define SELLBS 134
+#define ADDWI 135
+#define SUBWI 136
+#define S4ADDWI 137
+#define S4SUBWI 138
+#define S8ADDWI 139
+#define S8SUBWI 140
+#define ADDLI 141
+#define SUBLI 142
+#define S4ADDLI 143
+#define S4SUBLI 144
+#define S8ADDLI 145
+#define S8SUBLI 146
+#define MULWI 147
+#define DIVWI 148
+#define UDIVWI 149
+#define REMWI 150
+#define UREMWI 151
+#define MULLI 152
+#define MULHI 153
+#define DIVLI 154
+#define UDIVLI 155
+#define REMLI 156
+#define UREMLI 157
+#define ADDPII 158
+#define ADDPISI 159
+#define CMPEQI 160
+#define CMPLTI 161
+#define CMPLEI 162
+#define CMPULTI 163
+#define CMPULEI 164
+#define SBTI 165
+#define CBTI 166
+#define ANDI 167
+#define BICI 168
+#define BISI 169
+#define ORNOTI 170
+#define XORI 171
+#define EQVI 172
+#define INSLBI 173
+#define INSLHI 174
+#define INSLWI 175
+#define INSLLI 176
+#define INSHBI 177
+#define INSHHI 178
+#define INSHWI 179
+#define INSHLI 180
+#define SLLLI 181
+#define SRLLI 182
+#define SRALI 183
+#define ROLLI 184
+#define SLLWI 185
+#define SRLWI 186
+#define SRAWI 187
+#define ROLWI 188
+#define EXTLBI 189
+#define EXTLHI 190
+#define EXTLWI 191
+#define EXTLLI 192
+#define EXTHBI 193
+#define EXTHHI 194
+#define EXTHWI 195
+#define EXTHLI 196
+#define CTPOPI 197
+#define CTLZI 198
+#define CTTZI 199
+#define REVBHI 200
+#define REVBWI 201
+#define REVBLI 202
+#define CASWI 203
+#define CASLI 204
+#define MASKLBI 205
+#define MASKLHI 206
+#define MASKLWI 207
+#define MASKLLI 208
+#define MASKHBI 209
+#define MASKHHI 210
+#define MASKHWI 211
+#define MASKHLI 212
+#define ZAPI 213
+#define ZAPNOTI 214
+#define SEXTBI 215
+#define SEXTHI 216
+#define CMPGEBI 217
+#define SELEQI 218
+#define SELGEI 219
+#define SELGTI 220
+#define SELLEI 221
+#define SELLTI 222
+#define SELNEI 223
+#define SELLBCI 224
+#define SELLBSI 225
+#define VLOGZZ 226
+#define FADDS 227
+#define FADDD 228
+#define FSUBS 229
+#define FSUBD 230
+#define FMULS 231
+#define FMULD 232
+#define FDIVS 233
+#define FDIVD 234
+#define FSQRTS 235
+#define FSQRTD 236
+#define FCMPEQ 237
+#define FCMPLE 238
+#define FCMPLT 239
+#define FCMPUN 240
+#define FCVTSD 241
+#define FCVTDS 242
+#define FCVTDL_G 243
+#define FCVTDL_P 244
+#define FCVTDL_Z 245
+#define FCVTDL_N 246
+#define FCVTDL 247
+#define FCVTWL 248
+#define FCVTLW 249
+#define FCVTLS 250
+#define FCVTLD 251
+#define FCPYS 252
+#define FCPYSE 253
+#define FCPYSN 254
+#define IFMOVS 255
+#define IFMOVD 256
+#define RFPCR 257
+#define WFPCR 258
+#define SETFPEC0 259
+#define SETFPEC1 260
+#define SETFPEC2 261
+#define SETFPEC3 262
+#define FRECS 263
+#define FRECD 264
+#define FRIS 265
+#define FRIS_G 266
+#define FRIS_P 267
+#define FRIS_Z 268
+#define FRIS_N 269
+#define FRID 270
+#define FRID_G 271
+#define FRID_P 272
+#define FRID_Z 273
+#define FRID_N 274
+#define FMAS 275
+#define FMAD 276
+#define FMSS 277
+#define FMSD 278
+#define FNMAS 279
+#define FNMAD 280
+#define FNMSS 281
+#define FNMSD 282
+#define FSELEQ 283
+#define FSELNE 284
+#define FSELLT 285
+#define FSELLE 286
+#define FSELGT 287
+#define FSELGE 288
+#define VADDW 289
+#define VADDWI 290
+#define VSUBW 291
+#define VSUBWI 292
+#define VCMPGEW 293
+#define VCMPGEWI 294
+#define VCMPEQW 295
+#define VCMPEQWI 296
+#define VCMPLEW 297
+#define VCMPLEWI 298
+#define VCMPLTW 299
+#define VCMPLTWI 300
+#define VCMPULEW 301
+#define VCMPULEWI 302
+#define VCMPULTW 303
+#define VCMPULTWI 304
+#define VSLLW 305
+#define VSLLWI 306
+#define VSRLW 307
+#define VSRLWI 308
+#define VSRAW 309
+#define VSRAWI 310
+#define VROLW 311
+#define VROLWI 312
+#define SLLOW 313
+#define SLLOWI 314
+#define SRLOW 315
+#define SRLOWI 316
+#define VADDL 317
+#define VADDLI 318
+#define VSUBL 319
+#define VSUBLI 320
+#define VSLLB 321
+#define VSLLBI 322
+#define VSRLB 323
+#define VSRLBI 324
+#define VSRAB 325
+#define VSRABI 326
+#define VROLB 327
+#define VROLBI 328
+#define VSLLH 329
+#define VSLLHI 330
+#define VSRLH 331
+#define VSRLHI 332
+#define VSRAH 333
+#define VSRAHI 334
+#define VROLH 335
+#define VROLHI 336
+#define CTPOPOW 337
+#define CTLZOW 338
+#define VSLLL 339
+#define VSLLLI 340
+#define VSRLL 341
+#define VSRLLI 342
+#define VSRAL 343
+#define VSRALI 344
+#define VROLL 345
+#define VROLLI 346
+#define VMAXB 347
+#define VMINB 348
+#define VUCADDW 349
+#define VUCADDWI 350
+#define VUCSUBW 351
+#define VUCSUBWI 352
+#define VUCADDH 353
+#define VUCADDHI 354
+#define VUCSUBH 355
+#define VUCSUBHI 356
+#define VUCADDB 357
+#define VUCADDBI 358
+#define VUCSUBB 359
+#define VUCSUBBI 360
+#define SRAOW 361
+#define SRAOWI 362
+#define VSUMW 363
+#define VSUML 364
+#define VSM4R 365
+#define VBINVW 366
+#define VCMPUEQB 367
+#define VCMPUGTB 368
+#define VCMPUGTBI 369
+#define VSM3MSW 370
+#define VMAXH 371
+#define VMINH 372
+#define VMAXW 373
+#define VMINW 374
+#define VMAXL 375
+#define VMINL 376
+#define VUMAXB 377
+#define VUMINB 378
+#define VUMAXH 379
+#define VUMINH 380
+#define VUMAXW 381
+#define VUMINW 382
+#define VUMAXL 383
+#define VUMINL 384
+#define VSM4KEY 385
+#define VADDS 386
+#define VADDD 387
+#define VSUBS 388
+#define VSUBD 389
+#define VMULS 390
+#define VMULD 391
+#define VDIVS 392
+#define VDIVD 393
+#define VSQRTS 394
+#define VSQRTD 395
+#define VFCMPEQ 396
+#define VFCMPLE 397
+#define VFCMPLT 398
+#define VFCMPUN 399
+#define VCPYS 400
+#define VCPYSE 401
+#define VCPYSN 402
+#define VSUMS 403
+#define VSUMD 404
+#define VFCVTSD 405
+#define VFCVTDS 406
+#define VFCVTLS 407
+#define VFCVTLD 408
+#define VFCVTDL 409
+#define VFCVTDL_G 410
+#define VFCVTDL_P 411
+#define VFCVTDL_Z 412
+#define VFCVTDL_N 413
+#define VFRIS 414
+#define VFRIS_G 415
+#define VFRIS_P 416
+#define VFRIS_Z 417
+#define VFRIS_N 418
+#define VFRID 419
+#define VFRID_G 420
+#define VFRID_P 421
+#define VFRID_Z 422
+#define VFRID_N 423
+#define VFRECS 424
+#define VFRECD 425
+#define VMAXS 426
+#define VMINS 427
+#define VMAXD 428
+#define VMIND 429
+#define VMAS 430
+#define VMAD 431
+#define VMSS 432
+#define VMSD 433
+#define VNMAS 434
+#define VNMAD 435
+#define VNMSS 436
+#define VNMSD 437
+#define VFSELEQ 438
+#define VFSELLT 439
+#define VFSELLE 440
+#define VSELEQW 441
+#define VSELEQWI 442
+#define VSELLBCW 443
+#define VSELLBCWI 444
+#define VSELLTW 445
+#define VSELLTWI 446
+#define VSELLEW 447
+#define VSELLEWI 448
+#define VINSW 449
+#define VINSF 450
+#define VEXTW 451
+#define VEXTF 452
+#define VCPYW 453
+#define VCPYF 454
+#define VCONW 455
+#define VSHFW 456
+#define VCONS 457
+#define VCOND 458
+#define VINSB 459
+#define VINSH 460
+#define VINSECTLH 461
+#define VINSECTLW 462
+#define VINSECTLL 463
+#define VINSECTLB 464
+#define VSHFQ 465
+#define VSHFQB 466
+#define VCPYB 467
+#define VCPYH 468
+#define VSM3R 469
+#define VFCVTSH 470
+#define VFCVTHS 471
+#define VLDW_U 472
+#define VSTW_U 473
+#define VLDS_U 474
+#define VSTS_U 475
+#define VLDD_U 476
+#define VSTD_U 477
+#define VSTW_UL 478
+#define VSTW_UH 479
+#define VSTS_UL 480
+#define VSTS_UH 481
+#define VSTD_UL 482
+#define VSTD_UH 483
+#define VLDD_NC 484
+#define VSTD_NC 485
+#define LBR 486
+#define LDBU_A 487
+#define LDHU_A 488
+#define LDW_A 489
+#define LDL_A 490
+#define FLDS_A 491
+#define FLDD_A 492
+#define STBU_A 493
+#define STHU_A 494
+#define STW_A 495
+#define STL_A 496
+#define FSTS_A 497
+#define FSTD_A 498
+#define DPFHR 499
+#define DPFHW 500
+#define LDBU 501
+#define LDHU 502
+#define LDW 503
+#define LDL 504
+#define LDL_U 505
+#define PRI_LDL 506
+#define PRI_LDW 507
+#define FLDS 508
+#define FLDD 509
+#define STB 510
+#define STH 511
+#define STW 512
+#define STL 513
+#define STL_U 514
+#define PRI_STL 515
+#define PRI_STW 516
+#define FSTS 517
+#define FSTD 518
+#define BEQ 519
+#define BNE 520
+#define BLT 521
+#define BLE 522
+#define BGT 523
+#define BGE 524
+#define BLBC 525
+#define BLBS 526
+#define FBEQ 527
+#define FBNE 528
+#define FBLT 529
+#define FBLE 530
+#define FBGT 531
+#define FBGE 532
+#define LDIH 533
+#define LDI 534
+
+extern const char *insn_opc[535];
+
+#endif
diff --git a/target/sw64/simd_helper.c b/target/sw64/simd_helper.c
new file mode 100644
index 0000000000..13bd52de3d
--- /dev/null
+++ b/target/sw64/simd_helper.c
@@ -0,0 +1,1058 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#undef DEBUG_SIMD
+
+static inline uint8_t *get_element_b(CPUSW64State *env, uint64_t ra,
+ int index)
+{
+ return (uint8_t*)&env->fr[ra + (index / 8) * 32] + (index % 8);
+}
+
+static inline uint16_t *get_element_h(CPUSW64State *env, uint64_t ra,
+ int index)
+{
+ return (uint16_t*)&env->fr[ra + (index / 4) * 32] + (index % 4);
+}
+
+static inline uint32_t *get_element_w(CPUSW64State *env, uint64_t ra,
+ int index)
+{
+ return (uint32_t*)&env->fr[ra + (index / 2) * 32] + (index % 2);
+}
+
+static inline uint64_t *get_element_l(CPUSW64State *env, uint64_t ra,
+ int index)
+{
+ return &env->fr[ra + index * 32];
+}
+
+void helper_srlow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift)
+{
+ int i;
+ int adden;
+ int dest, src;
+ adden = shift >> 6;
+ shift &= 0x3f;
+#ifdef DEBUG_SIMD
+ printf("right shift = %ld adden = %d\n", shift, adden);
+ printf("in_fr[%ld]:", ra);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[ra + 32 * i]);
+ }
+ printf("\n");
+#endif
+
+ for (i = 0; (i + adden) < 4; i++) {
+ dest = i * 32 + rc;
+ src = (i + adden) * 32 + ra;
+ env->fr[dest] = env->fr[src] >> shift;
+ if (((i + adden) < 3) && (shift != 0))
+ env->fr[dest] |= (env->fr[src + 32] << (64 - shift));
+ }
+
+ for (; i < 4; i++) {
+ env->fr[rc + i * 32] = 0;
+ }
+#ifdef DEBUG_SIMD
+ printf("out_fr[%ld]:", rc);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[rc + 32 * i]);
+ }
+ printf("\n");
+#endif
+}
+
+void helper_sllow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift)
+{
+ int i;
+ int adden;
+ int dest, src;
+ adden = shift >> 6;
+ shift &= 0x3f;
+#ifdef DEBUG_SIMD
+ printf("left shift = %ld adden = %d\n", shift, adden);
+ printf("in_fr[%ld]:", ra);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[ra + 32 * i]);
+ }
+ printf("\n");
+#endif
+
+ for (i = 3; (i - adden) >= 0; i--) {
+ dest = i * 32 + rc;
+ src = (i - adden) * 32 + ra;
+ env->fr[dest] = env->fr[src] << shift;
+ if (((i - adden) > 0) && (shift != 0))
+ env->fr[dest] |= (env->fr[src - 32] >> (64 - shift));
+ }
+ for (; i >= 0; i--) {
+ env->fr[rc + i * 32] = 0;
+ }
+#ifdef DEBUG_SIMD
+ printf("out_fr[%ld]:", rc);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[rc + 32 * i]);
+ }
+ printf("\n");
+#endif
+}
+
+static uint64_t do_logzz(uint64_t va, uint64_t vb, uint64_t vc, uint64_t zz)
+{
+ int i;
+ uint64_t ret = 0;
+ int index;
+
+ for (i = 0; i < 64; i++) {
+ index = (((va >> i) & 1) << 2) | (((vb >> i) & 1) << 1) | ((vc >> i) & 1);
+ ret |= ((zz >> index) & 1) << i;
+ }
+
+ return ret;
+}
+
+void helper_vlogzz(CPUSW64State *env, uint64_t args, uint64_t rd, uint64_t zz)
+{
+ int i;
+ int ra, rb, rc;
+ ra = args >> 16;
+ rb = (args >> 8) & 0xff;
+ rc = args & 0xff;
+#ifdef DEBUG_SIMD
+ printf("zz = %lx\n", zz);
+ printf("in_fr[%d]:", ra);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[ra + 32 * i]);
+ }
+ printf("\n");
+ printf("in_fr[%d]:", rb);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[rb + 32 * i]);
+ }
+ printf("\n");
+ printf("in_fr[%d]:", rc);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[rc + 32 * i]);
+ }
+ printf("\n");
+#endif
+ for (i = 0; i < 4; i++) {
+ env->fr[rd + i * 32] = do_logzz(env->fr[ra + i * 32], env->fr[rb + i * 32],
+ env->fr[rc + i * 32], zz);
+ }
+#ifdef DEBUG_SIMD
+ printf("out_fr[%ld]:", rd);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[rd + 32 * i]);
+ }
+ printf("\n");
+#endif
+}
+
+void helper_v_print(CPUSW64State *env, uint64_t v)
+{
+ printf("PC[%lx]: fr[%lx]:\n", GETPC(), v);
+}
+
+void helper_vconw(CPUSW64State *env, uint64_t args, uint64_t rd,
+ uint64_t byte4_len)
+{
+ int ra, rb;
+ int count;
+ int i;
+ uint32_t *ptr_dst, *ptr_src;
+ uint32_t tmp[8];
+
+ ra = (args >> 8) & 0xff;
+ rb = args & 0xff;
+ count = 8 - byte4_len;
+
+ for (i = 0; i < 8; i++) {
+ ptr_dst = get_element_w(env, rd, i);
+ if (i < count) {
+ ptr_src = get_element_w(env, ra, i + byte4_len);
+ } else {
+ ptr_src = get_element_w(env, rb, i - count);
+ }
+ tmp[i] = *ptr_src;
+ }
+ for (i = 0; i < 8; i++) {
+ ptr_dst = get_element_w(env, rd, i);
+ *ptr_dst = tmp[i];
+ }
+}
+
+void helper_vcond(CPUSW64State *env, uint64_t args, uint64_t rd,
+ uint64_t byte8_len)
+{
+ int ra, rb;
+ int count;
+ int i;
+ uint64_t *ptr_dst, *ptr_src;
+ uint64_t tmp[8];
+
+ ra = (args >> 8) & 0xff;
+ rb = args & 0xff;
+ count = 4 - byte8_len;
+
+ for (i = 0; i < 4; i++) {
+ if (i < count) {
+ ptr_src = get_element_l(env, ra, i + byte8_len);
+ } else {
+ ptr_src = get_element_l(env, rb, i - count);
+ }
+ tmp[i] = *ptr_src;
+ }
+ for (i = 0; i < 4; i++) {
+ ptr_dst = get_element_l(env, rd, i + byte8_len);
+ *ptr_dst = tmp[i];
+ }
+}
+
+void helper_vshfw(CPUSW64State *env, uint64_t args, uint64_t rd, uint64_t vc)
+{
+ int ra, rb;
+ int i;
+ uint32_t *ptr_dst, *ptr_src;
+ uint32_t tmp[8];
+ int flag, idx;
+
+ ra = (args >> 8) & 0xff;
+ rb = args & 0xff;
+
+ for (i = 0; i < 8; i++) {
+ flag = (vc >> (i * 4)) & 0x8;
+ idx = (vc >> (i * 4)) & 0x7;
+ if (flag == 0) {
+ ptr_src = get_element_w(env, ra, idx);
+ } else {
+ ptr_src = get_element_w(env, rb, idx);
+ }
+ tmp[i] = *ptr_src;
+ }
+ for (i = 0; i < 8; i++) {
+ ptr_dst = get_element_w(env, rd, i);
+ *ptr_dst = tmp[i];
+ }
+}
+
+uint64_t helper_ctlzow(CPUSW64State *env, uint64_t ra)
+{
+ int i, j;
+ uint64_t val;
+ uint64_t ctlz = 0;
+
+ for (j = 3; j >= 0; j--) {
+ val = env->fr[ra + 32 * j];
+ for (i = 63; i >= 0; i--) {
+ if ((val >> i) & 1)
+ return ctlz << 29;
+ else
+ ctlz++;
+ }
+ }
+ return ctlz << 29;
+}
+
+void helper_vucaddw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ int a, b, c;
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ a = (int)(env->fr[ra + i * 32] & 0xffffffff);
+ b = (int)(env->fr[rb + i * 32] & 0xffffffff);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ ret = c;
+
+ a = (int)(env->fr[ra + i * 32] >> 32);
+ b = (int)(env->fr[rb + i * 32] >> 32);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) |
+ (uint64_t)(uint32_t)ret;
+ }
+}
+
+void helper_vucaddwi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc)
+{
+ int a, b, c;
+ int ret;
+ int i;
+
+ b = (int)vb;
+ for (i = 0; i < 4; i++) {
+ a = (int)(env->fr[ra + i * 32] & 0xffffffff);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ ret = c;
+
+ a = (int)(env->fr[ra + i * 32] >> 32);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) |
+ (uint64_t)(uint32_t)ret;
+ }
+}
+
+void helper_vucsubw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ int a, b, c;
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ a = (int)(env->fr[ra + i * 32] & 0xffffffff);
+ b = (int)(env->fr[rb + i * 32] & 0xffffffff);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ ret = c;
+
+ a = (int)(env->fr[ra + i * 32] >> 32);
+ b = (int)(env->fr[rb + i * 32] >> 32);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) |
+ (uint64_t)(uint32_t)ret;
+ }
+}
+
+void helper_vucsubwi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc)
+{
+ int a, b, c;
+ int ret;
+ int i;
+
+ b = (int)vb;
+ for (i = 0; i < 4; i++) {
+ a = (int)(env->fr[ra + i * 32] & 0xffffffff);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ ret = c;
+
+ a = (int)(env->fr[ra + i * 32] >> 32);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x80000000;
+ else
+ c = 0x7fffffff;
+ }
+ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) |
+ (uint64_t)(uint32_t)ret;
+ }
+}
+
+void helper_vucaddh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ short a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 4; j++) {
+ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff);
+ b = (short)((env->fr[rb + i * 32] >> (j * 16)) & 0xffff);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x8000;
+ else
+ c = 0x7fff;
+ }
+ ret |= ((uint64_t)(uint16_t)c) << (j * 16);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+void helper_vucaddhi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc)
+{
+ short a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ b = (short)vb;
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 4; j++) {
+ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x8000;
+ else
+ c = 0x7fff;
+ }
+ ret |= ((uint64_t)(uint16_t)c) << (j * 16);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+void helper_vucsubh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ short a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 4; j++) {
+ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff);
+ b = (short)((env->fr[rb + i * 32] >> (j * 16)) & 0xffff);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x8000;
+ else
+ c = 0x7fff;
+ }
+ ret |= ((uint64_t)(uint16_t)c) << (j * 16);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+void helper_vucsubhi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc)
+{
+ short a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ b = (short)vb;
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 4; j++) {
+ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x8000;
+ else
+ c = 0x7fff;
+ }
+ ret |= ((uint64_t)(uint16_t)c) << (j * 16);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+void helper_vucaddb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ int8_t a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 8; j++) {
+ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff);
+ b = (int8_t)((env->fr[rb + i * 32] >> (j * 8)) & 0xff);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x80;
+ else
+ c = 0x7f;
+ }
+ ret |= ((uint64_t)(uint8_t)c) << (j * 8);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+void helper_vucaddbi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc)
+{
+ int8_t a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ b = (int8_t)(vb & 0xff);
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 8; j++) {
+ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff);
+ c = a + b;
+ if ((c ^ a) < 0 && (c ^ b) < 0) {
+ if (a < 0)
+ c = 0x80;
+ else
+ c = 0x7f;
+ }
+ ret |= ((uint64_t)(uint8_t)c) << (j * 8);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+void helper_vucsubb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ int8_t a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 8; j++) {
+ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff);
+ b = (int8_t)((env->fr[rb + i * 32] >> (j * 8)) & 0xff);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x80;
+ else
+ c = 0x7f;
+ }
+ ret |= ((uint64_t)(uint8_t)c) << (j * 8);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+void helper_vucsubbi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc)
+{
+ int8_t a, b, c;
+ uint64_t ret;
+ int i, j;
+
+ b = (int8_t)(vb & 0xff);
+ for (i = 0; i < 4; i++) {
+ ret = 0;
+ for (j = 0; j < 8; j++) {
+ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xffff);
+ c = a - b;
+ if ((b ^ a) < 0 && (c ^ a) < 0) {
+ if (a < 0)
+ c = 0x80;
+ else
+ c = 0x7f;
+ }
+ ret |= ((uint64_t)(uint8_t)c) << (j * 8);
+ }
+ env->fr[rc + i * 32] = ret;
+ }
+}
+
+uint64_t helper_vstw(CPUSW64State *env, uint64_t t0, uint64_t t1)
+{
+ uint64_t idx, shift;
+
+ idx = t0 + (t1 / 2) * 32;
+ shift = (t1 % 2) * 32;
+
+ return (env->fr[idx] >> shift) & 0xffffffffUL;
+}
+
+uint64_t helper_vsts(CPUSW64State *env, uint64_t t0, uint64_t t1)
+{
+ uint64_t idx, val;
+
+ idx = t0 + t1 * 32;
+ val = env->fr[idx];
+
+ return ((val >> 32) & 0xc0000000) | ((val >> 29) & 0x3fffffff);
+}
+
+uint64_t helper_vstd(CPUSW64State *env, uint64_t t0, uint64_t t1)
+{
+ uint64_t idx;
+
+ idx = t0 + t1 * 32;
+ return env->fr[idx];
+}
+
+#define HELPER_VMAX(name, _suffix, type, loop) \
+ void glue(glue(helper_, name), _suffix)(CPUSW64State *env, uint64_t ra, \
+ uint64_t rb, uint64_t rc) \
+ { \
+ int i; \
+ type *ptr_dst, *ptr_src_a, *ptr_src_b; \
+ \
+ for (i = 0; i < loop; i++) { \
+ ptr_dst = (type*)glue(get_element_, _suffix)(env, rc, i); \
+ ptr_src_a = (type*)glue(get_element_, _suffix)(env, ra, i); \
+ ptr_src_b = (type*)glue(get_element_, _suffix)(env, rb, i); \
+ \
+ if (*ptr_src_a >= *ptr_src_b) { \
+ *ptr_dst = *ptr_src_a; \
+ } else { \
+ *ptr_dst = *ptr_src_b; \
+ } \
+ } \
+ }
+
+#define HELPER_VMIN(name, _suffix, type, loop) \
+ void glue(glue(helper_, name), _suffix)(CPUSW64State *env, uint64_t ra, \
+ uint64_t rb, uint64_t rc) \
+ { \
+ int i; \
+ type *ptr_dst, *ptr_src_a, *ptr_src_b; \
+ \
+ for (i = 0; i < loop; i++) { \
+ ptr_dst = (type*)glue(get_element_, _suffix)(env, rc, i); \
+ ptr_src_a = (type*)glue(get_element_, _suffix)(env, ra, i); \
+ ptr_src_b = (type*)glue(get_element_, _suffix)(env, rb, i); \
+ \
+ if (*ptr_src_a <= *ptr_src_b) { \
+ *ptr_dst = *ptr_src_a; \
+ } else { \
+ *ptr_dst = *ptr_src_b; \
+ } \
+ } \
+ }
+
+HELPER_VMAX(vmax, b, int8_t, 32)
+HELPER_VMIN(vmin, b, int8_t, 32)
+HELPER_VMAX(vmax, h, int16_t, 16)
+HELPER_VMIN(vmin, h, int16_t, 16)
+HELPER_VMAX(vmax, w, int32_t, 8)
+HELPER_VMIN(vmin, w, int32_t, 8)
+HELPER_VMAX(vumax, b, uint8_t, 32)
+HELPER_VMIN(vumin, b, uint8_t, 32)
+HELPER_VMAX(vumax, h, uint16_t, 16)
+HELPER_VMIN(vumin, h, uint16_t, 16)
+HELPER_VMAX(vumax, w, uint32_t, 8)
+HELPER_VMIN(vumin, w, uint32_t, 8)
+
+void helper_sraow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift)
+{
+ int i;
+ int adden;
+ int dest, src;
+ uint64_t sign;
+ adden = shift >> 6;
+ shift &= 0x3f;
+ sign = (uint64_t)((int64_t)env->fr[ra + 96] >> 63);
+#ifdef DEBUG_SIMD
+ printf("right shift = %ld adden = %d\n", shift, adden);
+ printf("in_fr[%ld]:", ra);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[ra + 32 * i]);
+ }
+ printf("\n");
+#endif
+
+ for (i = 0; (i + adden) < 4; i++) {
+ dest = i * 32 + rc;
+ src = (i + adden) * 32 + ra;
+ env->fr[dest] = env->fr[src] >> shift;
+ if (shift != 0) {
+ if (((i + adden) < 3))
+ env->fr[dest] |= (env->fr[src + 32] << (64 - shift));
+ else
+ env->fr[dest] |= (sign << (64 - shift));
+ }
+ }
+
+ for (; i < 4; i++) {
+ env->fr[rc + i * 32] = sign;
+ }
+#ifdef DEBUG_SIMD
+ printf("out_fr[%ld]:", rc);
+ for (i = 3 ; i >= 0; i--) {
+ printf("%016lx ", env->fr[rc + 32 * i]);
+ }
+ printf("\n");
+#endif
+}
+
+static uint16_t sm4_sbox[16][16] = {
+ { 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05 },
+ { 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99 },
+ { 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62 },
+ { 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6 },
+ { 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8 },
+ { 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35 },
+ { 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87 },
+ { 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e },
+ { 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1 },
+ { 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3 },
+ { 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f },
+ { 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51 },
+ { 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8 },
+ { 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0 },
+ { 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84 },
+ { 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 }
+};
+
+static uint32_t SBOX(uint32_t val)
+{
+ int ret = 0;
+ int i;
+ int idx_x, idx_y;
+ for (i = 0; i < 4; i++) {
+ idx_x = (val >> (i * 8)) & 0xff;
+ idx_y = idx_x & 0xf;
+ idx_x = idx_x >> 4;
+
+ ret |= (sm4_sbox[idx_x][idx_y] << (i * 8));
+ }
+ return ret;
+}
+
+static uint32_t rotl(uint32_t val, int shift)
+{
+ uint64_t ret = (uint64_t)val;
+ ret = (ret << (shift & 0x1f));
+ return (uint32_t)((ret & 0xffffffff) | (ret >> 32));
+}
+
+void helper_vsm4r(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ uint32_t W[12], rk[8];
+ uint32_t temp1, temp2;
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ rk[i] = *get_element_w(env, rb, i);
+ }
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 4; j++) {
+ W[j] = *get_element_w(env, ra, i * 4 + j);
+ }
+ for (j = 0; j < 8; j++) {
+ temp1 = W[j + 1] ^ W[j + 2] ^ W[j + 3] ^ rk[j];
+ temp2 = SBOX(temp1);
+ W[j + 4] = W[j] ^ temp2 ^ rotl(temp2, 2) ^ rotl(temp2, 10) ^ rotl(temp2, 18) ^ rotl(temp2, 24);
+ }
+
+ for (j = 0; j < 4; j++) {
+ *get_element_w(env, rc, i * 4 + j) = W[8 + j];
+ }
+ }
+}
+
+void helper_vcmpueqb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ uint8_t *ptr_a, *ptr_b, *ptr_c;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ ptr_a = get_element_b(env, ra, i);
+ ptr_b = get_element_b(env, rb, i);
+ ptr_c = get_element_b(env, rc, i);
+
+ *ptr_c = (*ptr_a == *ptr_b) ? 1 : 0;
+ ;
+ }
+}
+
+void helper_vcmpugtb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ uint8_t *ptr_a, *ptr_b, *ptr_c;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ ptr_a = get_element_b(env, ra, i);
+ ptr_b = get_element_b(env, rb, i);
+ ptr_c = get_element_b(env, rc, i);
+
+ *ptr_c = (*ptr_a > *ptr_b) ? 1 : 0;
+ ;
+ }
+}
+
+void helper_vcmpueqbi(CPUSW64State *env, uint64_t ra, uint64_t vb,
+ uint64_t rc)
+{
+ uint8_t *ptr_a, *ptr_c;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ ptr_a = get_element_b(env, ra, i);
+ ptr_c = get_element_b(env, rc, i);
+
+ *ptr_c = (*ptr_a == vb) ? 1 : 0;
+ ;
+ }
+}
+
+void helper_vcmpugtbi(CPUSW64State *env, uint64_t ra, uint64_t vb,
+ uint64_t rc)
+{
+ uint8_t *ptr_a, *ptr_c;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ ptr_a = get_element_b(env, ra, i);
+ ptr_c = get_element_b(env, rc, i);
+
+ *ptr_c = (*ptr_a > vb) ? 1 : 0;
+ ;
+ }
+}
+
+void helper_vsm3msw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc)
+{
+ uint32_t W[24];
+ uint32_t temp;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ W[i + 0] = *get_element_w(env, ra, i);
+ W[i + 8] = *get_element_w(env, rb, i);
+ }
+ for (i = 16; i < 24; i++) {
+ temp = W[i - 16] ^ W[i - 9] ^ rotl(W[i - 3], 15);
+ temp = temp ^ rotl(temp, 15) ^ rotl(temp, 23) ^ rotl(W[i - 13], 7) ^ W[i - 6];
+ W[i] = temp;
+ }
+ for (i = 0; i < 8; i++) {
+ *get_element_w(env, rc, i) = W[16 + i];
+ }
+}
+
+static uint32_t selck[4][8] = {
+ {0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9},
+ {0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9},
+ {0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299},
+ {0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279}
+};
+
+void helper_vsm4key(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc)
+{
+ uint32_t K[12], *CK;
+ int i;
+ uint32_t temp1, temp2;
+
+ for (i = 4; i < 8; i++) {
+ K[i - 4] = *get_element_w(env, ra, i);
+ }
+ CK = selck[vb];
+
+ for (i = 0; i < 8; i++) {
+ temp1 = K[i + 1] ^ K[i + 2] ^ K[i + 3] ^ CK[i];
+ temp2 = SBOX(temp1);
+ K[i + 4] = K[i] ^ temp2 ^ rotl(temp2, 13) ^ rotl(temp2, 23);
+ }
+ for (i = 0; i < 8; i++) {
+ *get_element_w(env, rc, i) = K[i + 4];
+ }
+}
+
+void helper_vinsb(CPUSW64State *env, uint64_t va, uint64_t rb, uint64_t vc,
+ uint64_t rd)
+{
+ int i;
+
+ for (i = 0; i < 128; i += 32) {
+ env->fr[rd + i] = env->fr[rb + i];
+ }
+
+ *get_element_b(env, rd, vc) = (uint8_t)(va & 0xff);
+}
+
+void helper_vinsh(CPUSW64State *env, uint64_t va, uint64_t rb, uint64_t vc,
+ uint64_t rd)
+{
+ int i;
+
+ if (vc >= 16)
+ return;
+
+ for (i = 0; i < 128; i += 32) {
+ env->fr[rd + i] = env->fr[rb + i];
+ }
+
+ *get_element_h(env, rd, vc) = (uint16_t)(va & 0xffff);
+}
+
+void helper_vinsectlh(CPUSW64State *env, uint64_t ra, uint64_t rb,
+ uint64_t rd)
+{
+ int i;
+ uint32_t temp[8];
+ for (i = 0; i < 8; i++) {
+ temp[i] = *get_element_h(env, ra, i) | ((uint32_t)*get_element_h(env, rb, i) << 16);
+ }
+ for (i = 0; i < 8; i++) {
+ *get_element_w(env, rd, i) = temp[i];
+ }
+}
+void helper_vinsectlw(CPUSW64State *env, uint64_t ra, uint64_t rb,
+ uint64_t rd)
+{
+ int i;
+ uint64_t temp[4];
+ for (i = 0; i < 4; i++) {
+ temp[i] = *get_element_w(env, ra, i) | ((uint64_t)*get_element_w(env, rb, i) << 32);
+ }
+ for (i = 0; i < 4; i++) {
+ *get_element_l(env, rd, i) = temp[i];
+ }
+}
+
+void helper_vinsectlb(CPUSW64State *env, uint64_t ra, uint64_t rb,
+ uint64_t rd)
+{
+ int i;
+ uint16_t temp[16];
+ for (i = 0; i < 16; i++) {
+ temp[i] = *get_element_b(env, ra, i) | ((uint16_t)*get_element_b(env, rb, i) << 8);
+ }
+ for (i = 0; i < 16; i++) {
+ *get_element_h(env, rd, i) = temp[i];
+ }
+}
+
+void helper_vshfq(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc,
+ uint64_t rd)
+{
+ int i;
+ int idx;
+ uint64_t temp[4];
+ for (i = 0; i < 2; i++) {
+ idx = ((vc >> (i * 2)) & 1) * 64;
+ if ((vc >> (i * 2 + 1)) & 1) {
+ temp[i * 2] = env->fr[rb + idx];
+ temp[i * 2 + 1] = env->fr[rb + idx + 32];
+ } else {
+ temp[i * 2] = env->fr[ra + idx];
+ temp[i * 2 + 1] = env->fr[ra + idx + 32];
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ env->fr[rd + i * 32] = temp[i];
+ }
+}
+
+void helper_vshfqb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rd)
+{
+ int i;
+ int idx;
+ int vb;
+ uint8_t temp[32];
+
+ for (i = 0; i < 16; i++) {
+ vb = *get_element_b(env, rb, i);
+ if (vb >> 7) {
+ temp[i] = 0;
+ } else {
+ idx = vb & 0xf;
+ temp[i] = *get_element_b(env, ra, idx);
+ }
+ vb = *get_element_b(env, rb, i + 16);
+ if (vb >> 7) {
+ temp[i + 16] = 0;
+ } else {
+ idx = vb & 0xf;
+ temp[i + 16] = *get_element_b(env, ra, idx + 16);
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ env->fr[rd + i * 32] = *((uint64_t*)temp + i);
+ }
+}
+
+void helper_vsm3r(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc,
+ uint64_t rd)
+{
+ uint32_t W[8];
+ uint32_t A, B, C, D, E, F, G, H, T;
+ int i;
+ uint32_t SS1, SS2, TT1, TT2, P0;
+
+ if (vc >= 16)
+ return;
+ for (i = 0; i < 8; i++) {
+ W[i] = *get_element_w(env, ra, i);
+ }
+ A = *get_element_w(env, rb, 0);
+ B = *get_element_w(env, rb, 1);
+ C = *get_element_w(env, rb, 2);
+ D = *get_element_w(env, rb, 3);
+ E = *get_element_w(env, rb, 4);
+ F = *get_element_w(env, rb, 5);
+ G = *get_element_w(env, rb, 6);
+ H = *get_element_w(env, rb, 7);
+
+ if (vc < 4) {
+ T = 0x79cc4519;
+ for (i = 0; i < 4; i++) {
+ SS1 = rotl(rotl(A, 12) + E + rotl(T, 4 * vc + i), 7);
+ SS2 = SS1 ^ rotl(A, 12);
+ TT1 = (A ^ B ^ C) + D + SS2 + (W[i] ^ W[i + 4]);
+ TT2 = (E ^ F ^ G) + H + SS1 + W[i];
+
+ P0 = TT2 ^ rotl(TT2, 9) ^ rotl(TT2, 17);
+
+ H = G;
+ G = rotl(F, 19);
+ F = E;
+ E = P0;
+ D = C;
+ C = rotl(B, 9);
+ B = A;
+ A = TT1;
+ }
+ } else {
+ T = 0x7a879d8a;
+ for (i = 0; i < 4; i++) {
+ SS1 = rotl(rotl(A, 12) + E + rotl(T, 4 * vc + i), 7);
+ SS2 = SS1 ^ rotl(A, 12);
+ TT1 = ((A & B) | (A & C) | (B & C)) + D + SS2 + (W[i] ^ W[i + 4]);
+ TT2 = ((E & F) | ((~E) & G)) + H + SS1 + W[i];
+
+ P0 = TT2 ^ rotl(TT2, 9) ^ rotl(TT2, 17);
+
+ H = G;
+ G = rotl(F, 19);
+ F = E;
+ E = P0;
+ D = C;
+ C = rotl(B, 9);
+ B = A;
+ A = TT1;
+ }
+ }
+ *get_element_w(env, rd, 0) = A;
+ *get_element_w(env, rd, 1) = B;
+ *get_element_w(env, rd, 2) = C;
+ *get_element_w(env, rd, 3) = D;
+ *get_element_w(env, rd, 4) = E;
+ *get_element_w(env, rd, 5) = F;
+ *get_element_w(env, rd, 6) = G;
+ *get_element_w(env, rd, 7) = H;
+}
diff --git a/target/sw64/translate.c b/target/sw64/translate.c
new file mode 100644
index 0000000000..37b7e89077
--- /dev/null
+++ b/target/sw64/translate.c
@@ -0,0 +1,3798 @@
+#include "translate.h"
+#include "tcg/tcg.h"
+#define DEVELOP_SW64 1
+#ifdef DEVELOP_SW64
+
+#define ILLEGAL(x) \
+ do { \
+ printf("Illegal SW64 0x%x at line %d!\n", x, __LINE__); \
+ exit(-1); \
+ } while (0)
+#endif
+
+TCGv cpu_pc;
+TCGv cpu_std_ir[31];
+TCGv cpu_fr[128];
+TCGv cpu_lock_addr;
+TCGv cpu_lock_flag;
+TCGv cpu_lock_success;
+#ifdef SW64_FIXLOCK
+TCGv cpu_lock_value;
+#endif
+
+#ifndef CONFIG_USER_ONLY
+TCGv cpu_hm_ir[31];
+#endif
+
+#include "exec/gen-icount.h"
+
+void sw64_translate_init(void)
+{
+#define DEF_VAR(V) \
+ { &cpu_##V, #V, offsetof(CPUSW64State, V) }
+
+ typedef struct {
+ TCGv* var;
+ const char* name;
+ int ofs;
+ } GlobalVar;
+
+ static const GlobalVar vars[] = {
+ DEF_VAR(pc), DEF_VAR(lock_addr),
+ DEF_VAR(lock_flag), DEF_VAR(lock_success),
+#ifdef SW64_FIXLOCK
+ DEF_VAR(lock_value),
+#endif
+ };
+ cpu_pc = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUSW64State, pc), "PC");
+
+#undef DEF_VAR
+
+ /* Use the symbolic register names that match the disassembler. */
+ static const char ireg_names[31][4] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1",
+ "s2", "s3", "s4", "s5", "fp", "a0", "a1", "a2", "a3", "a4", "a5",
+ "t8", "t9", "t10", "t11", "ra", "t12", "at", "gp", "sp"};
+
+ static const char freg_names[128][4] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9",
+ "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19",
+ "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29",
+ "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17",
+ "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27",
+ "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5",
+ "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25",
+ "f26", "f27", "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3",
+ "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+
+#ifndef CONFIG_USER_ONLY
+ static const char shadow_names[10][8] = {
+ "hm_p1", "hm_p2", "hm_p4", "hm_p5", "hm_p6",
+ "hm_p7", "hm_p20", "hm_p21", "hm_p22", "hm_p23"};
+ static const int shadow_index[10] = {1, 2, 4, 5, 6, 7, 20, 21, 22, 23};
+#endif
+
+ int i;
+
+ for (i = 0; i < 31; i++) {
+ cpu_std_ir[i] = tcg_global_mem_new_i64(
+ cpu_env, offsetof(CPUSW64State, ir[i]), ireg_names[i]);
+ }
+
+ for (i = 0; i < 128; i++) {
+ cpu_fr[i] = tcg_global_mem_new_i64(
+ cpu_env, offsetof(CPUSW64State, fr[i]), freg_names[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(vars); ++i) {
+ const GlobalVar* v = &vars[i];
+ *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
+ }
+#ifndef CONFIG_USER_ONLY
+ memcpy(cpu_hm_ir, cpu_std_ir, sizeof(cpu_hm_ir));
+ for (i = 0; i < 10; i++) {
+ int r = shadow_index[i];
+ cpu_hm_ir[r] = tcg_global_mem_new_i64(
+ cpu_env, offsetof(CPUSW64State, sr[i]), shadow_names[i]);
+ }
+#endif
+}
+
+static bool in_superpage(DisasContext* ctx, int64_t addr)
+{
+ return false;
+}
+
+bool use_exit_tb(DisasContext* ctx)
+{
+ return ((tb_cflags(ctx->base.tb) & CF_LAST_IO) ||
+ ctx->base.singlestep_enabled || singlestep);
+}
+
+bool use_goto_tb(DisasContext* ctx, uint64_t dest)
+{
+ /* Suppress goto_tb in the case of single-steping and IO. */
+ if (unlikely(use_exit_tb(ctx))) {
+ return false;
+ }
+ /* If the destination is in the superpage, the page perms can't change. */
+ if (in_superpage(ctx, dest)) {
+ return true;
+ }
+/* Check for the dest on the same page as the start of the TB. */
+#ifndef CONFIG_USER_ONLY
+ return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
+#else
+ return true;
+#endif
+}
+
+void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
+{
+ uint64_t mzero = 1ull << 63;
+
+ switch (cond) {
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ /* For <= or >, the -0.0 value directly compares the way we want. */
+ tcg_gen_mov_i64(dest, src);
+ break;
+
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ /* For == or !=, we can simply mask off the sign bit and compare. */
+ tcg_gen_andi_i64(dest, src, mzero - 1);
+ break;
+
+ case TCG_COND_GE:
+ case TCG_COND_LT:
+ /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
+ tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
+ tcg_gen_neg_i64(dest, dest);
+ tcg_gen_and_i64(dest, dest, src);
+ break;
+
+ default:
+ abort();
+ }
+}
+
+static TCGv load_zero(DisasContext *ctx)
+{
+ if (!ctx->zero) {
+ ctx->zero = tcg_const_i64(0);
+ }
+ return ctx->zero;
+}
+
+static void free_context_temps(DisasContext *ctx)
+{
+ if (ctx->zero) {
+ tcg_temp_free(ctx->zero);
+ ctx->zero = NULL;
+ }
+}
+
+static TCGv load_gir(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return ctx->ir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static void gen_excp_1(int exception, int error_code)
+{
+ TCGv_i32 tmp1, tmp2;
+
+ tmp1 = tcg_const_i32(exception);
+ tmp2 = tcg_const_i32(error_code);
+ gen_helper_excp(cpu_env, tmp1, tmp2);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp1);
+}
+
+static DisasJumpType gen_excp(DisasContext* ctx, int exception,
+ int error_code)
+{
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ gen_excp_1(exception, error_code);
+ return DISAS_NORETURN;
+}
+
+static int i_count = 1;
+
+static inline DisasJumpType gen_invalid(DisasContext *ctx)
+{
+ if (i_count == 0) {
+ i_count++;
+ return DISAS_NEXT;
+ }
+ fprintf(stderr, "here %lx\n", ctx->base.pc_next);
+ return gen_excp(ctx, EXCP_OPCDEC, 0);
+}
+
+static uint64_t zapnot_mask(uint8_t byte_mask)
+{
+ uint64_t mask = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ if ((byte_mask >> i) & 1) {
+ mask |= 0xffull << (i * 8);
+ }
+ }
+ return mask;
+}
+
+static void gen_ins_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb,
+ uint8_t byte_mask)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv shift = tcg_temp_new();
+
+ tcg_gen_andi_i64(tmp, va, zapnot_mask(byte_mask));
+
+ tcg_gen_andi_i64(shift, vb, 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_shl_i64(vc, tmp, shift);
+
+ tcg_temp_free(shift);
+ tcg_temp_free(tmp);
+}
+
+static void gen_ins_h(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb,
+ uint8_t byte_mask)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv shift = tcg_temp_new();
+
+ tcg_gen_andi_i64(tmp, va, zapnot_mask(byte_mask));
+
+ tcg_gen_shli_i64(shift, vb, 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+
+ tcg_gen_shr_i64(vc, tmp, shift);
+ tcg_gen_shri_i64(vc, vc, 1);
+ tcg_temp_free(shift);
+ tcg_temp_free(tmp);
+}
+
+static void gen_ext_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb,
+ uint8_t byte_mask)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv shift = tcg_temp_new();
+
+ tcg_gen_andi_i64(shift, vb, 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_shr_i64(tmp, va, shift);
+
+ tcg_gen_andi_i64(vc, tmp, zapnot_mask(byte_mask));
+
+ tcg_temp_free(shift);
+ tcg_temp_free(tmp);
+}
+
+static void gen_ext_h(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb,
+ uint8_t byte_mask)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv shift = tcg_temp_new();
+
+ tcg_gen_andi_i64(shift, vb, 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_movi_i64(tmp, 64);
+ tcg_gen_sub_i64(shift, tmp, shift);
+ tcg_gen_shl_i64(tmp, va, shift);
+
+ tcg_gen_andi_i64(vc, tmp, zapnot_mask(byte_mask));
+
+ tcg_temp_free(shift);
+ tcg_temp_free(tmp);
+}
+
+static void gen_mask_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb,
+ uint8_t byte_mask)
+{
+ TCGv shift = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+
+ tcg_gen_andi_i64(shift, vb, 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
+ tcg_gen_shl_i64(mask, mask, shift);
+
+ tcg_gen_andc_i64(vc, va, mask);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(shift);
+}
+
+static void gen_mask_h(DisasContext *ctx, TCGv vc, TCGv va, TCGv vb,
+ uint8_t byte_mask)
+{
+ TCGv shift = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+
+ /* The instruction description is as above, where the byte_mask
+ is shifted left, and then we extract bits <15:8>. This can be
+ emulated with a right-shift on the expanded byte mask. This
+ requires extra care because for an input <2:0> == 0 we need a
+ shift of 64 bits in order to generate a zero. This is done by
+ splitting the shift into two parts, the variable shift - 1
+ followed by a constant 1 shift. The code we expand below is
+ equivalent to ~(B * 8) & 63. */
+
+ tcg_gen_shli_i64(shift, vb, 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+ tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
+ tcg_gen_shr_i64(mask, mask, shift);
+ tcg_gen_shri_i64(mask, mask, 1);
+
+ tcg_gen_andc_i64(vc, va, mask);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(shift);
+}
+
+static inline void gen_load_mem(
+ DisasContext *ctx, void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, int flags),
+ int ra, int rb, int32_t disp16, bool fp, bool clear)
+{
+ TCGv tmp, addr, va;
+
+ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
+ prefetches, which we can treat as nops. No worries about
+ missed exceptions here. */
+ if (unlikely(ra == 31)) {
+ return;
+ }
+
+ tmp = tcg_temp_new();
+ addr = load_gir(ctx, rb);
+
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, (int64_t)disp16);
+ addr = tmp;
+ } else {
+ tcg_gen_mov_i64(tmp, addr);
+ addr = tmp;
+ }
+ if (clear) {
+ tcg_gen_andi_i64(tmp, addr, ~0x7UL);
+ addr = tmp;
+ }
+
+ va = (fp ? cpu_fr[ra] : load_gir(ctx, ra));
+ tcg_gen_qemu_load(va, addr, ctx->mem_idx);
+
+ tcg_temp_free(tmp);
+}
+
+static inline void gen_store_mem(
+ DisasContext *ctx, void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags),
+ int ra, int rb, int32_t disp16, bool fp, bool clear)
+{
+ TCGv tmp, addr, va;
+
+ tmp = tcg_temp_new();
+ addr = load_gir(ctx, rb);
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, disp16);
+ addr = tmp;
+ } else {
+ tcg_gen_mov_i64(tmp, addr);
+ addr = tmp;
+ }
+ if (clear) {
+ tcg_gen_andi_i64(tmp, addr, ~0x7);
+ addr = tmp;
+ }
+ va = (fp ? cpu_fr[ra] : load_gir(ctx, ra));
+
+ tcg_gen_qemu_store(va, addr, ctx->mem_idx);
+ gen_helper_trace_mem(cpu_env, addr, va);
+ tcg_temp_free(tmp);
+}
+
+static void cal_with_iregs_2(DisasContext *ctx, TCGv vc, TCGv va, TCGv vb,
+ int32_t disp13, uint16_t fn)
+{
+ TCGv tmp;
+
+ switch (fn & 0xff) {
+ case 0x00:
+ /* ADDW */
+ tcg_gen_add_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x01:
+ /* SUBW */
+ tcg_gen_sub_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x02:
+ /* S4ADDW */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x03:
+ /* S4SUBW */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x04:
+ /* S8ADDW */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x05:
+ /* S8SUBW */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+
+ case 0x08:
+ /* ADDL */
+ tcg_gen_add_i64(vc, va, vb);
+ break;
+ case 0x09:
+ /* SUBL */
+ tcg_gen_sub_i64(vc, va, vb);
+ break;
+ case 0x0a:
+ /* S4ADDL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x0b:
+ /* S4SUBL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x0c:
+ /* S8ADDL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x0d:
+ /* S8SUBL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x10:
+ /* MULW */
+ tcg_gen_mul_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x18:
+ /* MULL */
+ tcg_gen_mul_i64(vc, va, vb);
+ break;
+ case 0x19:
+ /* MULH */
+ tmp = tcg_temp_new();
+ tcg_gen_mulu2_i64(tmp, vc, va, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x28:
+ /* CMPEQ */
+ tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
+ break;
+ case 0x29:
+ /* CMPLT */
+ tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
+ break;
+ case 0x2a:
+ /* CMPLE */
+ tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
+ break;
+ case 0x2b:
+ /* CMPULT */
+ tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
+ break;
+ case 0x2c:
+ /* CMPULE */
+ tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
+ break;
+ case 0x38:
+ /* AND */
+ tcg_gen_and_i64(vc, va, vb);
+ break;
+ case 0x39:
+ /* BIC */
+ tcg_gen_andc_i64(vc, va, vb);
+ break;
+ case 0x3a:
+ /* BIS */
+ tcg_gen_or_i64(vc, va, vb);
+ break;
+ case 0x3b:
+ /* ORNOT */
+ tcg_gen_orc_i64(vc, va, vb);
+ break;
+ case 0x3c:
+ /* XOR */
+ tcg_gen_xor_i64(vc, va, vb);
+ break;
+ case 0x3d:
+ /* EQV */
+ tcg_gen_eqv_i64(vc, va, vb);
+ break;
+ case 0x40:
+ /* INSLB */
+ gen_ins_l(ctx, vc, va, vb, 0x1);
+ break;
+ case 0x41:
+ /* INSLH */
+ gen_ins_l(ctx, vc, va, vb, 0x3);
+ break;
+ case 0x42:
+ /* INSLW */
+ gen_ins_l(ctx, vc, va, vb, 0xf);
+ break;
+ case 0x43:
+ /* INSLL */
+ gen_ins_l(ctx, vc, va, vb, 0xff);
+ break;
+ case 0x44:
+ /* INSHB */
+ gen_ins_h(ctx, vc, va, vb, 0x1);
+ break;
+ case 0x45:
+ /* INSHH */
+ gen_ins_h(ctx, vc, va, vb, 0x3);
+ break;
+ case 0x46:
+ /* INSHW */
+ gen_ins_h(ctx, vc, va, vb, 0xf);
+ break;
+ case 0x47:
+ /* INSHL */
+ gen_ins_h(ctx, vc, va, vb, 0xff);
+ break;
+ case 0x48:
+ /* SLL/SLLL */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shl_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x49:
+ /* SRL/SRLL */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shr_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x4a:
+ /* SRA/SRAL */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_sar_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x50:
+ /* EXTLB */
+ gen_ext_l(ctx, vc, va, vb, 0x1);
+ break;
+ case 0x51:
+ /* EXTLH */
+ gen_ext_l(ctx, vc, va, vb, 0x3);
+ break;
+ case 0x52:
+ /* EXTLW */
+ gen_ext_l(ctx, vc, va, vb, 0xf);
+ break;
+ case 0x53:
+ /* EXTLL */
+ gen_ext_l(ctx, vc, va, vb, 0xff);
+ break;
+ case 0x54:
+ /* EXTHB */
+ gen_ext_h(ctx, vc, va, vb, 0x1);
+ break;
+ case 0x55:
+ /* EXTHH */
+ gen_ext_h(ctx, vc, va, vb, 0x3);
+ break;
+ case 0x56:
+ /* EXTHW */
+ gen_ext_h(ctx, vc, va, vb, 0xf);
+ break;
+ case 0x57:
+ /* EXTHL */
+ gen_ext_h(ctx, vc, va, vb, 0xff);
+ break;
+ case 0x58:
+ /* CTPOP */
+ tcg_gen_ctpop_i64(vc, vb);
+ break;
+ case 0x59:
+ /* CTLZ */
+ tcg_gen_clzi_i64(vc, vb, 64);
+ break;
+ case 0x5a:
+ /* CTTZ */
+ tcg_gen_ctzi_i64(vc, vb, 64);
+ break;
+ case 0x60:
+ /* MASKLB */
+ gen_mask_l(ctx, vc, va, vb, 0x1);
+ break;
+ case 0x61:
+ /* MASKLH */
+ gen_mask_l(ctx, vc, va, vb, 0x3);
+ break;
+ case 0x62:
+ /* MASKLW */
+ gen_mask_l(ctx, vc, va, vb, 0xf);
+ break;
+ case 0x63:
+ /* MASKLL */
+ gen_mask_l(ctx, vc, va, vb, 0xff);
+ break;
+ case 0x64:
+ /* MASKHB */
+ gen_mask_h(ctx, vc, va, vb, 0x1);
+ break;
+ case 0x65:
+ /* MASKHH */
+ gen_mask_h(ctx, vc, va, vb, 0x3);
+ break;
+ case 0x66:
+ /* MASKHW */
+ gen_mask_h(ctx, vc, va, vb, 0xf);
+ break;
+ case 0x67:
+ /* MASKHL */
+ gen_mask_h(ctx, vc, va, vb, 0xff);
+ break;
+ case 0x68:
+ /* ZAP */
+ gen_helper_zap(vc, va, vb);
+ break;
+ case 0x69:
+ /* ZAPNOT */
+ gen_helper_zapnot(vc, va, vb);
+ break;
+ case 0x6a:
+ /* SEXTB */
+ tcg_gen_ext8s_i64(vc, vb);
+ break;
+ case 0x6b:
+ /* SEXTH */
+ tcg_gen_ext16s_i64(vc, vb);
+ break;
+ case 0x6c:
+ /* CMPGEB*/
+ gen_helper_cmpgeb(vc, va, vb);
+ break;
+ default:
+ ILLEGAL(fn);
+ }
+}
+
+static void cal_with_imm_2(DisasContext *ctx, TCGv vc, TCGv va, int64_t disp,
+ uint8_t fn)
+{
+ TCGv_i64 t0 = tcg_const_i64(disp);
+ cal_with_iregs_2(ctx, vc, va, t0, 0, fn);
+ tcg_temp_free_i64(t0);
+}
+
+static void cal_with_iregs_3(DisasContext *ctx, TCGv vd, TCGv va, TCGv vb,
+ TCGv vc, uint8_t fn)
+{
+ TCGv_i64 t0 = tcg_const_i64(0);
+ TCGv_i64 tmp;
+ switch (fn) {
+ case 0x0:
+ /* SELEQ */
+ tcg_gen_movcond_i64(TCG_COND_EQ, vd, va, t0, vb, vc);
+ break;
+ case 0x1:
+ /* SELGE */
+ tcg_gen_movcond_i64(TCG_COND_GE, vd, va, t0, vb, vc);
+ break;
+ case 0x2:
+ /* SELGT */
+ tcg_gen_movcond_i64(TCG_COND_GT, vd, va, t0, vb, vc);
+ break;
+ case 0x3:
+ /* SELLE */
+ tcg_gen_movcond_i64(TCG_COND_LE, vd, va, t0, vb, vc);
+ break;
+ case 0x4:
+ /* SELLT */
+ tcg_gen_movcond_i64(TCG_COND_LT, vd, va, t0, vb, vc);
+ break;
+ case 0x5:
+ /* SELNE */
+ tcg_gen_movcond_i64(TCG_COND_NE, vd, va, t0, vb, vc);
+ break;
+ case 0x6:
+ /* SELLBC */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp, t0, vb, vc);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 0x7:
+ /* SELLBS */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp, t0, vb, vc);
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ ILLEGAL(fn);
+ break;
+ }
+ tcg_temp_free_i64(t0);
+}
+
+static void cal_with_imm_3(DisasContext *ctx, TCGv vd, TCGv va, int32_t disp,
+ TCGv vc, uint8_t fn)
+{
+ TCGv_i64 vb = tcg_const_i64(disp);
+ cal_with_iregs_3(ctx, vd, va, vb, vc, fn);
+ tcg_temp_free_i64(vb);
+}
+
+static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
+{
+ uint64_t dest = ctx->base.pc_next + ((int64_t)disp << 2);
+ if (ra != 31) {
+ tcg_gen_movi_i64(load_gir(ctx, ra), ctx->base.pc_next & (~0x3UL));
+ }
+ if (disp == 0) {
+ return 0;
+ } else if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, dest);
+ tcg_gen_exit_tb(ctx->base.tb, 0);
+ return DISAS_NORETURN;
+ } else {
+ tcg_gen_movi_i64(cpu_pc, dest);
+ return DISAS_PC_UPDATED;
+ }
+}
+
+static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
+ TCGv cmp, int disp)
+{
+ uint64_t dest = ctx->base.pc_next + (disp << 2);
+ TCGLabel* lab_true = gen_new_label();
+
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
+
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ tcg_gen_exit_tb(ctx->base.tb, 0);
+
+ gen_set_label(lab_true);
+ tcg_gen_goto_tb(1);
+ tcg_gen_movi_i64(cpu_pc, dest);
+ tcg_gen_exit_tb(ctx->base.tb, 1);
+
+ return DISAS_NORETURN;
+ } else {
+ TCGv_i64 t = tcg_const_i64(0);
+ TCGv_i64 d = tcg_const_i64(dest);
+ TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
+
+ tcg_gen_movcond_i64(cond, cpu_pc, cmp, t, d, p);
+
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(d);
+ tcg_temp_free_i64(p);
+ return DISAS_PC_UPDATED;
+ }
+}
+
+static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, uint32_t ra,
+ int32_t disp, uint64_t mask)
+{
+ TCGv tmp = tcg_temp_new();
+ DisasJumpType ret;
+
+ tcg_gen_andi_i64(tmp, load_gir(ctx, ra), mask);
+ ret = gen_bcond_internal(ctx, cond, tmp, disp);
+ tcg_temp_free(tmp);
+ return ret;
+}
+
+static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
+ int32_t disp)
+{
+ TCGv cmp_tmp = tcg_temp_new();
+ DisasJumpType ret;
+
+ gen_fold_mzero(cond, cmp_tmp, cpu_fr[ra]);
+ ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
+ tcg_temp_free(cmp_tmp);
+ return ret;
+}
+
+#ifndef CONFIG_USER_ONLY
+static void gen_qemu_pri_ldw(TCGv t0, TCGv t1, int memidx)
+{
+ gen_helper_pri_ldw(t0, cpu_env, t1);
+}
+
+static void gen_qemu_pri_stw(TCGv t0, TCGv t1, int memidx)
+{
+ gen_helper_pri_stw(cpu_env, t0, t1);
+}
+
+static void gen_qemu_pri_ldl(TCGv t0, TCGv t1, int memidx)
+{
+ gen_helper_pri_ldl(t0, cpu_env, t1);
+}
+
+static void gen_qemu_pri_stl(TCGv t0, TCGv t1, int memidx)
+{
+ gen_helper_pri_stl(cpu_env, t0, t1);
+}
+#endif
+
+static inline void gen_load_mem_simd(
+ DisasContext *ctx, void (*tcg_gen_qemu_load)(int t0, TCGv t1, int flags),
+ int ra, int rb, int32_t disp16, uint64_t mask)
+{
+ TCGv tmp, addr;
+
+ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
+ prefetches, which we can treat as nops. No worries about
+ missed exceptions here. */
+ if (unlikely(ra == 31))
+ return;
+
+ tmp = tcg_temp_new();
+ addr = load_gir(ctx, rb);
+
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, (int64_t)disp16);
+ addr = tmp;
+ } else {
+ tcg_gen_mov_i64(tmp, addr);
+ addr = tmp;
+ }
+
+ if (mask) {
+ tcg_gen_andi_i64(addr, addr, mask);
+ }
+
+ tcg_gen_qemu_load(ra, addr, ctx->mem_idx);
+ // FIXME: for debug
+
+ tcg_temp_free(tmp);
+}
+
+static inline void gen_store_mem_simd(
+ DisasContext *ctx, void (*tcg_gen_qemu_store)(int t0, TCGv t1, int flags),
+ int ra, int rb, int32_t disp16, uint64_t mask)
+{
+ TCGv tmp, addr;
+
+ tmp = tcg_temp_new();
+ addr = load_gir(ctx, rb);
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, (int64_t)disp16);
+ addr = tmp;
+ } else {
+ tcg_gen_mov_i64(tmp, addr);
+ addr = tmp;
+ }
+ if (mask) {
+ tcg_gen_andi_i64(addr, addr, mask);
+ }
+ // FIXME: for debug
+ tcg_gen_qemu_store(ra, addr, ctx->mem_idx);
+
+ tcg_temp_free(tmp);
+}
+
+static void gen_qemu_ldwe(int t0, TCGv t1, int memidx)
+{
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_qemu_ld_i64(tmp, t1, memidx, MO_ALIGN_4 | MO_LEUL);
+ tcg_gen_shli_i64(cpu_fr[t0], tmp, 32);
+ tcg_gen_or_i64(cpu_fr[t0], cpu_fr[t0], tmp);
+ tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]);
+ tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]);
+ tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]);
+
+ tcg_temp_free(tmp);
+}
+
+static void gen_qemu_vlds(int t0, TCGv t1, int memidx)
+{
+ int i;
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_ALIGN_4 | MO_LEUL);
+ gen_helper_memory_to_s(cpu_fr[t0], tmp32);
+ tcg_gen_addi_i64(t1, t1, 4);
+
+ for (i = 1; i < 4; i++) {
+ tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_LEUL);
+ gen_helper_memory_to_s(cpu_fr[t0 + i * 32], tmp32);
+ tcg_gen_addi_i64(t1, t1, 4);
+ }
+
+ tcg_temp_free_i32(tmp32);
+}
+
+static void gen_qemu_ldse(int t0, TCGv t1, int memidx)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ TCGv tmp64 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_ALIGN_4 | MO_LEUL);
+ gen_helper_memory_to_s(cpu_fr[t0], tmp32);
+ tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]);
+ tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]);
+ tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]);
+
+ tcg_temp_free(tmp64);
+ tcg_temp_free_i32(tmp32);
+}
+
+static void gen_qemu_ldde(int t0, TCGv t1, int memidx)
+{
+ tcg_gen_qemu_ld_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEQ);
+ tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]);
+ tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]);
+ tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]);
+}
+
+static void gen_qemu_vldd(int t0, TCGv t1, int memidx)
+{
+ tcg_gen_qemu_ld_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEQ);
+ tcg_gen_addi_i64(t1, t1, 8);
+ tcg_gen_qemu_ld_i64(cpu_fr[t0 + 32], t1, memidx, MO_TEQ);
+ tcg_gen_addi_i64(t1, t1, 8);
+ tcg_gen_qemu_ld_i64(cpu_fr[t0 + 64], t1, memidx, MO_TEQ);
+ tcg_gen_addi_i64(t1, t1, 8);
+ tcg_gen_qemu_ld_i64(cpu_fr[t0 + 96], t1, memidx, MO_TEQ);
+}
+
+static void gen_qemu_vsts(int t0, TCGv t1, int memidx)
+{
+ int i;
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_s_to_memory(tmp, cpu_fr[t0]);
+ tcg_gen_qemu_st_i32(tmp, t1, memidx, MO_ALIGN_4 | MO_LEUL);
+ tcg_gen_addi_i64(t1, t1, 4);
+ for (i = 1; i < 4; i++) {
+ gen_helper_s_to_memory(tmp, cpu_fr[t0 + 32 * i]);
+ tcg_gen_qemu_st_i32(tmp, t1, memidx, MO_LEUL);
+ tcg_gen_addi_i64(t1, t1, 4);
+ }
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_qemu_vstd(int t0, TCGv t1, int memidx)
+{
+ tcg_gen_qemu_st_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEQ);
+ tcg_gen_addi_i64(t1, t1, 8);
+ tcg_gen_qemu_st_i64(cpu_fr[t0 + 32], t1, memidx, MO_TEQ);
+ tcg_gen_addi_i64(t1, t1, 8);
+ tcg_gen_qemu_st_i64(cpu_fr[t0 + 64], t1, memidx, MO_TEQ);
+ tcg_gen_addi_i64(t1, t1, 8);
+ tcg_gen_qemu_st_i64(cpu_fr[t0 + 96], t1, memidx, MO_TEQ);
+}
+
+static inline void gen_qemu_fsts(TCGv t0, TCGv t1, int flags)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_s_to_memory(tmp, t0);
+ tcg_gen_qemu_st_i32(tmp, t1, flags, MO_LEUL);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void gen_qemu_flds(TCGv t0, TCGv t1, int flags)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(tmp, t1, flags, MO_LEUL);
+ gen_helper_memory_to_s(t0, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static TCGv gen_ieee_input(DisasContext *ctx, int reg, int is_cmp)
+{
+ TCGv val;
+
+ if (unlikely(reg == 31)) {
+ val = load_zero(ctx);
+ } else {
+ val = cpu_fr[reg];
+#ifndef CONFIG_USER_ONLY
+ /* In system mode, raise exceptions for denormals like real
+ hardware. In user mode, proceed as if the OS completion
+ handler is handling the denormal as per spec. */
+ gen_helper_ieee_input(cpu_env, val);
+#endif
+ }
+ return val;
+}
+
+static void gen_fp_exc_raise(int rc)
+{
+#ifndef CONFIG_USER_ONLY
+ TCGv_i32 reg = tcg_const_i32(rc + 32);
+ gen_helper_fp_exc_raise(cpu_env, reg);
+ tcg_temp_free_i32(reg);
+#endif
+}
+
+static void gen_ieee_arith2(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv), int ra,
+ int rc)
+{
+ TCGv va, vc;
+
+ va = gen_ieee_input(ctx, ra, 0);
+ vc = cpu_fr[rc];
+ helper(vc, cpu_env, va);
+
+ gen_fp_exc_raise(rc);
+}
+
+static void gen_ieee_arith3(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra,
+ int rb, int rc)
+{
+ TCGv va, vb, vc;
+
+ va = gen_ieee_input(ctx, ra, 0);
+ vb = gen_ieee_input(ctx, rb, 0);
+ vc = cpu_fr[rc];
+ helper(vc, cpu_env, va, vb);
+
+ gen_fp_exc_raise(rc);
+}
+
+#define IEEE_ARITH2(name) \
+ static inline void glue(gen_, name)(DisasContext * ctx, int ra, int rc) { \
+ gen_ieee_arith2(ctx, gen_helper_##name, ra, rc); \
+ }
+
+#define IEEE_ARITH3(name) \
+ static inline void glue(gen_, name)(DisasContext * ctx, int ra, int rb, \
+ int rc) { \
+ gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc); \
+ }
+IEEE_ARITH3(fadds)
+IEEE_ARITH3(faddd)
+IEEE_ARITH3(fsubs)
+IEEE_ARITH3(fsubd)
+IEEE_ARITH3(fmuls)
+IEEE_ARITH3(fmuld)
+IEEE_ARITH3(fdivs)
+IEEE_ARITH3(fdivd)
+IEEE_ARITH2(frecs)
+IEEE_ARITH2(frecd)
+
+static void gen_ieee_compare(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra,
+ int rb, int rc)
+{
+ TCGv va, vb, vc;
+
+ va = gen_ieee_input(ctx, ra, 1);
+ vb = gen_ieee_input(ctx, rb, 1);
+ vc = cpu_fr[rc];
+ helper(vc, cpu_env, va, vb);
+
+ gen_fp_exc_raise(rc);
+}
+
+#define IEEE_CMP2(name) \
+ static inline void glue(gen_, name)(DisasContext *ctx, int ra, int rb, \
+ int rc) { \
+ gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc); \
+ }
+
+IEEE_CMP2(fcmpun)
+IEEE_CMP2(fcmpeq)
+IEEE_CMP2(fcmplt)
+IEEE_CMP2(fcmple)
+
+static void gen_fcvtdl(int rb, int rc, uint64_t round_mode)
+{
+ TCGv tmp64;
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_movi_i64(tmp64, round_mode);
+ gen_helper_fcvtdl(cpu_fr[rc], cpu_env, cpu_fr[rb], tmp64);
+ tcg_temp_free(tmp64);
+ gen_fp_exc_raise(rc);
+}
+
+static void cal_with_fregs_2(DisasContext *ctx, uint8_t rc, uint8_t ra,
+ uint8_t rb, uint8_t fn)
+{
+ TCGv tmp64;
+ TCGv_i32 tmp32;
+ switch (fn) {
+ case 0x00:
+ /* FADDS */
+ gen_fadds(ctx, ra, rb, rc);
+ break;
+ case 0x01:
+ /* FADDD */
+ gen_faddd(ctx, ra, rb, rc);
+ break;
+ case 0x02:
+ /* FSUBS */
+ gen_fsubs(ctx, ra, rb, rc);
+ break;
+ case 0x03:
+ /* FSUBD */
+ gen_fsubd(ctx, ra, rb, rc);
+ break;
+ case 0x4:
+ /* FMULS */
+ gen_fmuls(ctx, ra, rb, rc);
+ break;
+ case 0x05:
+ /* FMULD */
+ gen_fmuld(ctx, ra, rb, rc);
+ break;
+ case 0x06:
+ /* FDIVS */
+ gen_fdivs(ctx, ra, rb, rc);
+ break;
+ case 0x07:
+ /* FDIVD */
+ gen_fdivd(ctx, ra, rb, rc);
+ break;
+ case 0x08:
+ /* FSQRTS */
+ gen_helper_fsqrts(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x09:
+ /* FSQRTD */
+ gen_helper_fsqrt(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x10:
+ /* FCMPEQ */
+ gen_fcmpeq(ctx, ra, rb, rc);
+ break;
+ case 0x11:
+ /* FCMPLE */
+ gen_fcmple(ctx, ra, rb, rc);
+ break;
+ case 0x12:
+ /* FCMPLT */
+ gen_fcmplt(ctx, ra, rb, rc);
+ break;
+ case 0x13:
+ /* FCMPUN */
+ gen_fcmpun(ctx, ra, rb, rc);
+ break;
+ case 0x20:
+ /* FCVTSD */
+ gen_helper_fcvtsd(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x21:
+ /* FCVTDS */
+ gen_helper_fcvtds(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x22:
+ /* FCVTDL_G */
+ gen_fcvtdl(rb, rc, 0);
+ break;
+ case 0x23:
+ /* FCVTDL_P */
+ gen_fcvtdl(rb, rc, 2);
+ break;
+ case 0x24:
+ /* FCVTDL_Z */
+ gen_fcvtdl(rb, rc, 3);
+ break;
+ case 0x25:
+ /* FCVTDL_N */
+ gen_fcvtdl(rb, rc, 1);
+ break;
+ case 0x27:
+ /* FCVTDL */
+ gen_helper_fcvtdl_dyn(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x28:
+ /* FCVTWL */
+ gen_helper_fcvtwl(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ tcg_gen_ext32s_i64(cpu_fr[rc], cpu_fr[rc]);
+ break;
+ case 0x29:
+ /* FCVTLW */
+ gen_helper_fcvtlw(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x2d:
+ /* FCVTLS */
+ gen_helper_fcvtls(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x2f:
+ /* FCVTLD */
+ gen_helper_fcvtld(cpu_fr[rc], cpu_env, cpu_fr[rb]);
+ break;
+ case 0x30:
+ /* FCPYS */
+ tmp64 = tcg_temp_new();
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 63);
+ tcg_gen_shli_i64(tmp64, tmp64, 63);
+ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x7fffffffffffffffUL);
+ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x31:
+ /* FCPYSE */
+ tmp64 = tcg_temp_new();
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 52);
+ tcg_gen_shli_i64(tmp64, tmp64, 52);
+ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x000fffffffffffffUL);
+ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x32:
+ /* FCPYSN */
+ tmp64 = tcg_temp_new();
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 63);
+ tcg_gen_not_i64(tmp64, tmp64);
+ tcg_gen_shli_i64(tmp64, tmp64, 63);
+ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x7fffffffffffffffUL);
+ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x40:
+ /* IFMOVS */
+ tmp64 = tcg_temp_new();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_movi_i64(tmp64, ra);
+ tcg_gen_extrl_i64_i32(tmp32, load_gir(ctx, ra));
+ gen_helper_memory_to_s(tmp64, tmp32);
+ tcg_gen_mov_i64(cpu_fr[rc], tmp64);
+ tcg_gen_movi_i64(tmp64, rc);
+ tcg_temp_free(tmp64);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x41:
+ /* IFMOVD */
+ tcg_gen_mov_i64(cpu_fr[rc], load_gir(ctx, ra));
+ break;
+ case 0x50:
+ /* RFPCR */
+ gen_helper_load_fpcr(cpu_fr[ra], cpu_env);
+ break;
+ case 0x51:
+ /* WFPCR */
+ gen_helper_store_fpcr(cpu_env, cpu_fr[ra]);
+ break;
+ case 0x54:
+ /* SETFPEC0 */
+ tmp64 = tcg_const_i64(0);
+ gen_helper_setfpcrx(cpu_env, tmp64);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x55:
+ /* SETFPEC1 */
+ tmp64 = tcg_const_i64(1);
+ gen_helper_setfpcrx(cpu_env, tmp64);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x56:
+ /* SETFPEC2 */
+ tmp64 = tcg_const_i64(2);
+ gen_helper_setfpcrx(cpu_env, tmp64);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x57:
+ /* SETFPEC3 */
+ tmp64 = tcg_const_i64(3);
+ gen_helper_setfpcrx(cpu_env, tmp64);
+ tcg_temp_free(tmp64);
+ break;
+ default:
+ fprintf(stderr, "Illegal insn func[%x]\n", fn);
+ gen_invalid(ctx);
+ break;
+ }
+}
+
+static void cal_with_fregs_4(DisasContext *ctx, uint8_t rd, uint8_t ra,
+ uint8_t rb, uint8_t rc, uint8_t fn)
+{
+ TCGv zero = tcg_const_i64(0);
+ TCGv va, vb, vc, vd, tmp64;
+
+ va = cpu_fr[ra];
+ vb = cpu_fr[rb];
+ vc = cpu_fr[rc];
+ vd = cpu_fr[rd];
+
+ switch (fn) {
+ case 0x00:
+ /* FMAS */
+ gen_helper_fmas(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x01:
+ /* FMAD */
+ gen_helper_fmad(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x02:
+ /* FMSS */
+ gen_helper_fmss(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x03:
+ /* FMSD */
+ gen_helper_fmsd(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x04:
+ /* FNMAS */
+ gen_helper_fnmas(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x05:
+ /* FNMAD */
+ gen_helper_fnmad(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x06:
+ /* FNMSS */
+ gen_helper_fnmss(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x07:
+ /* FNMSD */
+ gen_helper_fnmsd(vd, cpu_env, va, vb, vc);
+ break;
+ case 0x10:
+ /* FSELEQ */
+ // Maybe wrong translation.
+ tmp64 = tcg_temp_new();
+ gen_helper_fcmpeq(tmp64, cpu_env, va, zero);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x11:
+ /* FSELNE */
+ tmp64 = tcg_temp_new();
+ gen_helper_fcmpeq(tmp64, cpu_env, va, zero);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vb, vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x12:
+ /* FSELLT */
+ tmp64 = tcg_temp_new();
+ gen_helper_fcmplt(tmp64, cpu_env, va, zero);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x13:
+ /* FSELLE */
+ tmp64 = tcg_temp_new();
+ gen_helper_fcmple(tmp64, cpu_env, va, zero);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x14:
+ /* FSELGT */
+ tmp64 = tcg_temp_new();
+ gen_helper_fcmpgt(tmp64, cpu_env, va, zero);
+ tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp64, zero, vb, vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x15:
+ /* FSELGE */
+ tmp64 = tcg_temp_new();
+ gen_helper_fcmpge(tmp64, cpu_env, va, zero);
+ tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp64, zero, vb, vc);
+ tcg_temp_free(tmp64);
+ break;
+ default:
+ fprintf(stderr, "Illegal insn func[%x]\n", fn);
+ gen_invalid(ctx);
+ break;
+ }
+ tcg_temp_free(zero);
+}
+static inline void gen_qemu_lldw(TCGv t0, TCGv t1, int flags)
+{
+ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
+ tcg_gen_mov_i64(cpu_lock_addr, t1);
+#ifdef SW64_FIXLOCK
+ tcg_gen_ext32u_i64(cpu_lock_value, t0);
+#endif
+}
+
+static inline void gen_qemu_lldl(TCGv t0, TCGv t1, int flags)
+{
+ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
+ tcg_gen_mov_i64(cpu_lock_addr, t1);
+#ifdef SW64_FIXLOCK
+ tcg_gen_mov_i64(cpu_lock_value, t0);
+#endif
+}
+
+static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
+ int32_t disp16, int mem_idx,
+ MemOp op)
+{
+ TCGLabel *lab_fail, *lab_done;
+ TCGv addr;
+
+ addr = tcg_temp_new_i64();
+ tcg_gen_addi_i64(addr, load_gir(ctx, rb), disp16);
+ free_context_temps(ctx);
+
+ lab_fail = gen_new_label();
+ lab_done = gen_new_label();
+ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
+ tcg_temp_free_i64(addr);
+ tcg_gen_brcondi_i64(TCG_COND_NE, cpu_lock_flag, 0x1, lab_fail);
+#ifdef SW64_FIXLOCK
+ TCGv val = tcg_temp_new_i64();
+ tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
+ load_gir(ctx, ra), mem_idx, op);
+ tcg_gen_setcond_i64(TCG_COND_EQ, cpu_lock_success, val, cpu_lock_value);
+ tcg_temp_free_i64(val);
+#else
+ tcg_gen_qemu_st_i64(load_gir(ctx, ra), addr, mem_idx, op);
+#endif
+
+ tcg_gen_br(lab_done);
+
+ gen_set_label(lab_fail);
+ tcg_gen_movi_i64(cpu_lock_success, 0);
+ gen_set_label(lab_done);
+
+ tcg_gen_movi_i64(cpu_lock_flag, 0);
+ tcg_gen_movi_i64(cpu_lock_addr, -1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType gen_sys_call(DisasContext *ctx, int syscode)
+{
+ if (syscode >= 0x80 && syscode <= 0xbf) {
+ switch (syscode) {
+ case 0x86:
+ /* IMB */
+ /* No-op inside QEMU */
+ break;
+#ifdef CONFIG_USER_ONLY
+ case 0x9E:
+ /* RDUNIQUE */
+ tcg_gen_ld_i64(ctx->ir[IDX_V0], cpu_env,
+ offsetof(CPUSW64State, unique));
+ break;
+ case 0x9F:
+ /* WRUNIQUE */
+ tcg_gen_st_i64(ctx->ir[IDX_A0], cpu_env,
+ offsetof(CPUSW64State, unique));
+ break;
+#endif
+ default:
+ goto do_sys_call;
+ }
+ return DISAS_NEXT;
+ }
+do_sys_call:
+#ifdef CONFIG_USER_ONLY
+ return gen_excp(ctx, EXCP_CALL_SYS, syscode);
+#else
+ tcg_gen_movi_i64(cpu_hm_ir[23], ctx->base.pc_next);
+ return gen_excp(ctx, EXCP_CALL_SYS, syscode);
+#endif
+}
+
+static void read_csr(int idx, TCGv va)
+{
+ TCGv_i64 tmp = tcg_const_i64(idx);
+ gen_helper_read_csr(va, cpu_env, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+static void write_csr(int idx, TCGv va, CPUSW64State *env)
+{
+ TCGv_i64 tmp = tcg_const_i64(idx);
+ gen_helper_write_csr(cpu_env, tmp, va);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void ldx_set(DisasContext *ctx, int ra, int rb, int32_t disp12,
+ bool bype)
+{
+ TCGv tmp, addr, va, t1;
+
+ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
+ prefetches, which we can treat as nops. No worries about
+ missed exceptions here. */
+ if (unlikely(ra == 31)) {
+ return;
+ }
+
+ tmp = tcg_temp_new();
+ t1 = tcg_const_i64(1);
+ addr = load_gir(ctx, rb);
+
+ tcg_gen_addi_i64(tmp, addr, disp12);
+ addr = tmp;
+
+ va = load_gir(ctx, ra);
+ if (bype == 0) {
+ tcg_gen_atomic_xchg_i64(va, addr, t1, ctx->mem_idx, MO_TESL);
+ } else {
+ tcg_gen_atomic_xchg_i64(va, addr, t1, ctx->mem_idx, MO_TEQ);
+ }
+
+ tcg_temp_free(tmp);
+ tcg_temp_free(t1);
+}
+
+static inline void ldx_xxx(DisasContext *ctx, int ra, int rb, int32_t disp12,
+ bool bype, int64_t val)
+{
+ TCGv tmp, addr, va, t;
+
+ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
+ prefetches, which we can treat as nops. No worries about
+ missed exceptions here. */
+ if (unlikely(ra == 31)) {
+ return;
+ }
+
+ tmp = tcg_temp_new();
+ t = tcg_const_i64(val);
+ addr = load_gir(ctx, rb);
+
+ tcg_gen_addi_i64(tmp, addr, disp12);
+ addr = tmp;
+
+ va = load_gir(ctx, ra);
+ if (bype == 0) {
+ tcg_gen_atomic_fetch_add_i64(va, addr, t, ctx->mem_idx, MO_TESL);
+ } else {
+ tcg_gen_atomic_fetch_add_i64(va, addr, t, ctx->mem_idx, MO_TEQ);
+ }
+
+ tcg_temp_free(tmp);
+ tcg_temp_free(t);
+}
+
+static void tcg_gen_srlow_i64(int ra, int rc, int rb)
+{
+ TCGv va, vb, vc;
+ TCGv shift;
+
+ va = tcg_const_i64(ra);
+ vc = tcg_const_i64(rc);
+ shift = tcg_temp_new();
+ vb = cpu_fr[rb];
+ tcg_gen_shri_i64(shift, vb, 29);
+ tcg_gen_andi_i64(shift, shift, 0xff);
+
+ gen_helper_srlow(cpu_env, va, vc, shift);
+
+ tcg_temp_free(vc);
+ tcg_temp_free(va);
+ tcg_temp_free(shift);
+}
+
+static void tcg_gen_srlowi_i64(int ra, int rc, int disp8)
+{
+ TCGv va, vc;
+ TCGv shift;
+
+ va = tcg_const_i64(ra);
+ vc = tcg_const_i64(rc);
+ shift = tcg_temp_new();
+ tcg_gen_movi_i64(shift, disp8);
+ tcg_gen_andi_i64(shift, shift, 0xff);
+
+ gen_helper_srlow(cpu_env, va, vc, shift);
+
+ tcg_temp_free(vc);
+ tcg_temp_free(va);
+ tcg_temp_free(shift);
+}
+
+static void tcg_gen_sllow_i64(int ra, int rc, int rb)
+{
+ TCGv va, vb, vc;
+ TCGv shift;
+
+ va = tcg_const_i64(ra);
+ vc = tcg_const_i64(rc);
+ shift = tcg_temp_new();
+ vb = cpu_fr[rb];
+ tcg_gen_shri_i64(shift, vb, 29);
+ tcg_gen_andi_i64(shift, shift, 0xff);
+
+ gen_helper_sllow(cpu_env, va, vc, shift);
+
+ tcg_temp_free(vc);
+ tcg_temp_free(va);
+ tcg_temp_free(shift);
+}
+
+static void tcg_gen_sllowi_i64(int ra, int rc, int disp8)
+{
+ TCGv va, vc;
+ TCGv shift;
+
+ va = tcg_const_i64(ra);
+ vc = tcg_const_i64(rc);
+ shift = tcg_temp_new();
+ tcg_gen_movi_i64(shift, disp8);
+ tcg_gen_andi_i64(shift, shift, 0xff);
+
+ gen_helper_sllow(cpu_env, va, vc, shift);
+
+ tcg_temp_free(vc);
+ tcg_temp_free(va);
+ tcg_temp_free(shift);
+}
+
+static void gen_qemu_vstw_uh(int t0, TCGv t1, int memidx)
+{
+ TCGv byte4_len;
+ TCGv addr_start, addr_end;
+ TCGv tmp[8];
+ TCGv ti;
+ int i;
+
+ tmp[0] = tcg_temp_new();
+ tmp[1] = tcg_temp_new();
+ tmp[2] = tcg_temp_new();
+ tmp[3] = tcg_temp_new();
+ tmp[4] = tcg_temp_new();
+ tmp[5] = tcg_temp_new();
+ tmp[6] = tcg_temp_new();
+ tmp[7] = tcg_temp_new();
+ ti = tcg_temp_new();
+ addr_start = tcg_temp_new();
+ addr_end = tcg_temp_new();
+ byte4_len = tcg_temp_new();
+
+ tcg_gen_shri_i64(byte4_len, t1, 2);
+ tcg_gen_andi_i64(byte4_len, byte4_len, 0x7UL);
+ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_andi_i64(addr_start, t1, ~0x1fUL);
+ tcg_gen_mov_i64(addr_end, t1);
+ for (i = 7; i >= 0; i--) {
+ tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start);
+ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL);
+ tcg_gen_subi_i64(t1, t1, 4);
+ tcg_gen_movi_i64(ti, i);
+ if (i % 2)
+ tcg_gen_shli_i64(tmp[i], tmp[i], 32);
+ }
+ tcg_gen_subfi_i64(byte4_len, 8, byte4_len);
+
+ for (i = 0; i < 8; i++) {
+ tcg_gen_movi_i64(ti, i);
+ tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte4_len, cpu_fr[t0 + (i / 2)*32], tmp[i]);
+ if (i % 2)
+ tcg_gen_shri_i64(tmp[i], tmp[i], 32);
+ else
+ tcg_gen_andi_i64(tmp[i], tmp[i], 0xffffffffUL);
+ }
+
+ tcg_gen_subi_i64(addr_end, addr_end, 32);
+ for (i = 0; i < 8; i++) {
+ tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start);
+ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL);
+ tcg_gen_addi_i64(addr_end, addr_end, 4);
+ }
+
+ tcg_temp_free(ti);
+ tcg_temp_free(addr_start);
+ tcg_temp_free(addr_end);
+ tcg_temp_free(byte4_len);
+ tcg_temp_free(tmp[0]);
+ tcg_temp_free(tmp[1]);
+ tcg_temp_free(tmp[2]);
+ tcg_temp_free(tmp[3]);
+ tcg_temp_free(tmp[4]);
+ tcg_temp_free(tmp[5]);
+ tcg_temp_free(tmp[6]);
+ tcg_temp_free(tmp[7]);
+}
+
+static void gen_qemu_vstw_ul(int t0, TCGv t1, int memidx)
+{
+ TCGv byte4_len;
+ TCGv addr_start, addr_end;
+ TCGv tmp[8];
+ TCGv ti;
+ int i;
+
+ tmp[0] = tcg_temp_new();
+ tmp[1] = tcg_temp_new();
+ tmp[2] = tcg_temp_new();
+ tmp[3] = tcg_temp_new();
+ tmp[4] = tcg_temp_new();
+ tmp[5] = tcg_temp_new();
+ tmp[6] = tcg_temp_new();
+ tmp[7] = tcg_temp_new();
+ ti = tcg_temp_new();
+ addr_start = tcg_temp_new();
+ addr_end = tcg_temp_new();
+ byte4_len = tcg_temp_new();
+
+ tcg_gen_shri_i64(byte4_len, t1, 2);
+ tcg_gen_andi_i64(byte4_len, byte4_len, 0x7UL);
+ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_addi_i64(addr_end, addr_start, 24);
+ for (i = 0; i < 8; i++) {
+ tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end);
+ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL);
+ tcg_gen_addi_i64(t1, t1, 4);
+ if (i % 2)
+ tcg_gen_shli_i64(tmp[i], tmp[i], 32);
+ }
+ tcg_gen_subfi_i64(byte4_len, 8, byte4_len);
+
+ for (i = 0; i < 8; i++) {
+ tcg_gen_movi_i64(ti, i);
+ tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte4_len, cpu_fr[t0 + (i/2)*32], tmp[i]);
+ if (i % 2)
+ tcg_gen_shri_i64(tmp[i], tmp[i], 32);
+ else
+ tcg_gen_andi_i64(tmp[i], tmp[i], 0xffffffffUL);
+ }
+
+ tcg_gen_addi_i64(addr_start, addr_start, 32);
+ for (i = 7; i >= 0; i--) {
+ tcg_gen_subi_i64(addr_start, addr_start, 4);
+ tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end);
+ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL);
+ }
+
+ tcg_temp_free(ti);
+ tcg_temp_free(addr_start);
+ tcg_temp_free(addr_end);
+ tcg_temp_free(byte4_len);
+ tcg_temp_free(tmp[0]);
+ tcg_temp_free(tmp[1]);
+ tcg_temp_free(tmp[2]);
+ tcg_temp_free(tmp[3]);
+ tcg_temp_free(tmp[4]);
+ tcg_temp_free(tmp[5]);
+ tcg_temp_free(tmp[6]);
+ tcg_temp_free(tmp[7]);
+}
+
+static void gen_qemu_vsts_uh(int t0, TCGv t1, int memidx)
+{
+ TCGv byte4_len;
+ TCGv addr_start, addr_end;
+ TCGv tmp[4];
+ TCGv ftmp;
+ TCGv ti;
+ int i;
+
+ tmp[0] = tcg_temp_new();
+ tmp[1] = tcg_temp_new();
+ tmp[2] = tcg_temp_new();
+ tmp[3] = tcg_temp_new();
+ ti = tcg_temp_new();
+ ftmp = tcg_temp_new();
+ addr_start = tcg_temp_new();
+ addr_end = tcg_temp_new();
+ byte4_len = tcg_temp_new();
+
+ tcg_gen_shri_i64(byte4_len, t1, 2);
+ tcg_gen_andi_i64(byte4_len, byte4_len, 0x3UL);
+ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_andi_i64(addr_start, t1, ~0xfUL);
+ tcg_gen_mov_i64(addr_end, t1);
+ for (i = 3; i >= 0; i--) {
+ tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start);
+ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL);
+ tcg_gen_subi_i64(t1, t1, 4);
+ }
+ tcg_gen_subfi_i64(byte4_len, 4, byte4_len);
+
+ for (i = 0; i < 4; i++) {
+ tcg_gen_shri_i64(ti, cpu_fr[t0 + i * 32], 62);
+ tcg_gen_shli_i64(ti, ti, 30);
+ tcg_gen_shri_i64(ftmp, cpu_fr[t0 + i * 32], 29);
+ tcg_gen_andi_i64(ftmp, ftmp, 0x3fffffffUL);
+ tcg_gen_or_i64(ftmp, ftmp, ti);
+ tcg_gen_movi_i64(ti, i);
+ tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte4_len, ftmp, tmp[i]);
+ }
+
+ tcg_gen_subi_i64(addr_end, addr_end, 16);
+ for (i = 0; i < 4; i++) {
+ tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start);
+ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL);
+ tcg_gen_addi_i64(addr_end, addr_end, 4);
+ }
+
+ tcg_temp_free(ti);
+ tcg_temp_free(ftmp);
+ tcg_temp_free(addr_start);
+ tcg_temp_free(addr_end);
+ tcg_temp_free(byte4_len);
+ tcg_temp_free(tmp[0]);
+ tcg_temp_free(tmp[1]);
+ tcg_temp_free(tmp[2]);
+ tcg_temp_free(tmp[3]);
+}
+
+static void gen_qemu_vsts_ul(int t0, TCGv t1, int memidx)
+{
+ TCGv byte4_len;
+ TCGv addr_start, addr_end;
+ TCGv tmp[4];
+ TCGv ftmp;
+ TCGv ti;
+ int i;
+
+ tmp[0] = tcg_temp_new();
+ tmp[1] = tcg_temp_new();
+ tmp[2] = tcg_temp_new();
+ tmp[3] = tcg_temp_new();
+ ftmp = tcg_temp_new();
+ ti = tcg_temp_new();
+ addr_start = tcg_temp_new();
+ addr_end = tcg_temp_new();
+ byte4_len = tcg_temp_new();
+
+ tcg_gen_shri_i64(byte4_len, t1, 2);
+ tcg_gen_andi_i64(byte4_len, byte4_len, 0x3UL);
+ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_addi_i64(addr_end, addr_start, 12);
+ for (i = 0; i < 4; i++) {
+ tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end);
+ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL);
+ tcg_gen_addi_i64(t1, t1, 4);
+ }
+ tcg_gen_subfi_i64(byte4_len, 4, byte4_len);
+
+ for (i = 0; i < 4; i++) {
+ tcg_gen_shri_i64(ti, cpu_fr[t0 + i * 32], 62);
+ tcg_gen_shli_i64(ti, ti, 30);
+ tcg_gen_shri_i64(ftmp, cpu_fr[t0 + i * 32], 29);
+ tcg_gen_andi_i64(ftmp, ftmp, 0x3fffffffUL);
+ tcg_gen_or_i64(ftmp, ftmp, ti);
+ tcg_gen_movi_i64(ti, i);
+ tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte4_len, ftmp, tmp[i]);
+ }
+
+ tcg_gen_addi_i64(addr_start, addr_start, 16);
+ for (i = 3; i >= 0; i--) {
+ tcg_gen_subi_i64(addr_start, addr_start, 4);
+ tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end);
+ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL);
+ }
+
+ tcg_temp_free(ti);
+ tcg_temp_free(addr_start);
+ tcg_temp_free(addr_end);
+ tcg_temp_free(byte4_len);
+ tcg_temp_free(ftmp);
+ tcg_temp_free(tmp[0]);
+ tcg_temp_free(tmp[1]);
+ tcg_temp_free(tmp[2]);
+ tcg_temp_free(tmp[3]);
+}
+
+static void gen_qemu_vstd_uh(int t0, TCGv t1, int memidx)
+{
+ TCGv byte8_len;
+ TCGv addr_start, addr_end;
+ TCGv tmp[4];
+ TCGv ti;
+ int i;
+
+ tmp[0] = tcg_temp_new();
+ tmp[1] = tcg_temp_new();
+ tmp[2] = tcg_temp_new();
+ tmp[3] = tcg_temp_new();
+ ti = tcg_temp_new();
+ addr_start = tcg_temp_new();
+ addr_end = tcg_temp_new();
+ byte8_len = tcg_temp_new();
+
+ tcg_gen_shri_i64(byte8_len, t1, 3);
+ tcg_gen_andi_i64(byte8_len, byte8_len, 0x3UL);
+ tcg_gen_andi_i64(t1, t1, ~0x7UL); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_andi_i64(addr_start, t1, ~0x1fUL);
+ tcg_gen_mov_i64(addr_end, t1);
+ for (i = 3; i >= 0; i--) {
+ tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start);
+ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEQ);
+ tcg_gen_subi_i64(t1, t1, 8);
+ }
+ tcg_gen_subfi_i64(byte8_len, 4, byte8_len);
+
+ for (i = 0; i < 4; i++) {
+ tcg_gen_movi_i64(ti, i);
+ tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte8_len, cpu_fr[t0 + i*32], tmp[i]);
+ }
+
+ tcg_gen_subi_i64(addr_end, addr_end, 32);
+ for (i = 0; i < 4; i++) {
+ tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start);
+ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEQ);
+ tcg_gen_addi_i64(addr_end, addr_end, 8);
+ }
+
+ tcg_temp_free(ti);
+ tcg_temp_free(addr_start);
+ tcg_temp_free(addr_end);
+ tcg_temp_free(byte8_len);
+ tcg_temp_free(tmp[0]);
+ tcg_temp_free(tmp[1]);
+ tcg_temp_free(tmp[2]);
+ tcg_temp_free(tmp[3]);
+}
+
+static void gen_qemu_vstd_ul(int t0, TCGv t1, int memidx)
+{
+ TCGv byte8_len;
+ TCGv addr_start, addr_end;
+ TCGv tmp[4];
+ TCGv ti;
+ int i;
+
+ tmp[0] = tcg_temp_new();
+ tmp[1] = tcg_temp_new();
+ tmp[2] = tcg_temp_new();
+ tmp[3] = tcg_temp_new();
+ ti = tcg_temp_new();
+ addr_start = tcg_temp_new();
+ addr_end = tcg_temp_new();
+ byte8_len = tcg_temp_new();
+
+ tcg_gen_shri_i64(byte8_len, t1, 3);
+ tcg_gen_andi_i64(byte8_len, byte8_len, 0x3UL);
+ tcg_gen_andi_i64(t1, t1, ~0x7UL); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */
+ tcg_gen_addi_i64(addr_end, addr_start, 24);
+ for (i = 0; i < 4; i++) {
+ tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end);
+ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEQ);
+ tcg_gen_addi_i64(t1, t1, 8);
+ }
+ tcg_gen_subfi_i64(byte8_len, 4, byte8_len);
+
+ for (i = 0; i < 4; i++) {
+ tcg_gen_movi_i64(ti, i);
+ tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte8_len, cpu_fr[t0 + i*32], tmp[i]);
+ }
+
+ tcg_gen_addi_i64(addr_start, addr_start, 32);
+ for (i = 3; i >= 0; i--) {
+ tcg_gen_subi_i64(addr_start, addr_start, 8);
+ tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end);
+ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEQ);
+ }
+
+ tcg_temp_free(ti);
+ tcg_temp_free(addr_start);
+ tcg_temp_free(addr_end);
+ tcg_temp_free(byte8_len);
+ tcg_temp_free(tmp[0]);
+ tcg_temp_free(tmp[1]);
+ tcg_temp_free(tmp[2]);
+ tcg_temp_free(tmp[3]);
+}
+
+static void tcg_gen_vcpys_i64(int ra, int rb, int rc)
+{
+ int i;
+ TCGv tmp64 = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 63);
+ tcg_gen_shli_i64(tmp64, tmp64, 63);
+ tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x7fffffffffffffffUL);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]);
+ }
+ tcg_temp_free(tmp64);
+}
+
+static void tcg_gen_vcpyse_i64(int ra, int rb, int rc)
+{
+ int i;
+
+ TCGv tmp64 = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 52);
+ tcg_gen_shli_i64(tmp64, tmp64, 52);
+ tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x000fffffffffffffUL);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]);
+ }
+ tcg_temp_free(tmp64);
+}
+
+static void tcg_gen_vcpysn_i64(int ra, int rb, int rc)
+{
+ int i;
+ TCGv tmp64 = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 63);
+ tcg_gen_not_i64(tmp64, tmp64);
+ tcg_gen_shli_i64(tmp64, tmp64, 63);
+ tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x7fffffffffffffffUL);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]);
+ }
+ tcg_temp_free(tmp64);
+}
+
+static void tcg_gen_vlogzz_i64(DisasContext *ctx, int opc, int ra, int rb,
+ int rc, int rd, int fn6)
+{
+ TCGv zz;
+ TCGv args, vd;
+ zz = tcg_const_i64(((opc & 0x3) << 6) | fn6);
+ args = tcg_const_i64((ra << 16) | (rb << 8) | rc);
+ vd = tcg_const_i64(rd);
+
+ gen_helper_vlogzz(cpu_env, args, vd, zz);
+
+ tcg_temp_free(vd);
+ tcg_temp_free(args);
+ tcg_temp_free(zz);
+}
+
+static void gen_qemu_vcmpxxw_i64(TCGCond cond, int ra, int rb, int rc)
+{
+ TCGv va, vb, vc, tmp64;
+ int i;
+
+ va = tcg_temp_new();
+ vb = tcg_temp_new();
+ vc = tcg_temp_new();
+ tmp64 = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ if ((cond >> 1) & 1) {
+ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]);
+ tcg_gen_ext32s_i64(vb, cpu_fr[rb + i]);
+ } else {
+ tcg_gen_ext32u_i64(va, cpu_fr[ra + i]);
+ tcg_gen_ext32u_i64(vb, cpu_fr[rb + i]);
+ }
+ tcg_gen_setcond_i64(cond, vc, va, vb);
+ tcg_gen_mov_i64(tmp64, vc);
+
+ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32);
+ tcg_gen_shri_i64(vb, cpu_fr[rb + i], 32);
+ if ((cond >> 1) & 1) {
+ tcg_gen_ext32s_i64(va, va);
+ tcg_gen_ext32s_i64(vb, vb);
+ } else {
+ tcg_gen_ext32u_i64(va, va);
+ tcg_gen_ext32u_i64(vb, vb);
+ }
+ tcg_gen_setcond_i64(cond, vc, va, vb);
+ tcg_gen_shli_i64(vc, vc, 32);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+}
+
+static void gen_qemu_vcmpxxwi_i64(TCGCond cond, int ra, int rb, int rc)
+{
+ TCGv va, vb, vc, tmp64;
+ int i;
+
+ va = tcg_temp_new();
+ vb = tcg_const_i64(rb);
+ vc = tcg_temp_new();
+ tmp64 = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ if ((cond >> 1) & 1) {
+ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]);
+ } else {
+ tcg_gen_ext32u_i64(va, cpu_fr[ra + i]);
+ }
+ tcg_gen_setcond_i64(cond, vc, va, vb);
+ tcg_gen_mov_i64(tmp64, vc);
+
+ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32);
+ if ((cond >> 1) & 1) {
+ tcg_gen_ext32s_i64(va, va);
+ } else {
+ tcg_gen_ext32u_i64(va, va);
+ }
+ tcg_gen_setcond_i64(cond, vc, va, vb);
+ tcg_gen_shli_i64(vc, vc, 32);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+}
+
+static void gen_qemu_vselxxw(TCGCond cond, int ra, int rb, int rc, int rd,
+ int mask)
+{
+ int i;
+
+ TCGv t0 = tcg_const_i64(0);
+ TCGv tmpa = tcg_temp_new();
+ TCGv tmpb = tcg_temp_new();
+ TCGv tmpc = tcg_temp_new();
+ TCGv tmpd = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_ext32s_i64(tmpa, cpu_fr[ra + i]);
+ tcg_gen_ext32u_i64(tmpb, cpu_fr[rb + i]);
+ tcg_gen_ext32u_i64(tmpc, cpu_fr[rc + i]);
+ if (mask) tcg_gen_andi_i64(tmpa, tmpa, mask);
+ tcg_gen_movcond_i64(cond, tmpd, tmpa, t0, tmpb, tmpc);
+
+ tcg_gen_andi_i64(tmpa, cpu_fr[ra + i], 0xffffffff00000000UL);
+ tcg_gen_andi_i64(tmpb, cpu_fr[rb + i], 0xffffffff00000000UL);
+ tcg_gen_andi_i64(tmpc, cpu_fr[rc + i], 0xffffffff00000000UL);
+ if (mask) tcg_gen_andi_i64(tmpa, tmpa, (uint64_t)mask << 32);
+ tcg_gen_movcond_i64(cond, cpu_fr[rd + i], tmpa, t0, tmpb, tmpc);
+
+ tcg_gen_or_i64(cpu_fr[rd + i], cpu_fr[rd + i], tmpd);
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(tmpa);
+ tcg_temp_free(tmpb);
+ tcg_temp_free(tmpc);
+ tcg_temp_free(tmpd);
+}
+
+static void gen_qemu_vselxxwi(TCGCond cond, int ra, int rb, int disp8, int rd,
+ int mask)
+{
+ int i;
+
+ TCGv t0 = tcg_const_i64(0);
+ TCGv tmpa = tcg_temp_new();
+ TCGv tmpb = tcg_temp_new();
+ TCGv tmpc_0 = tcg_temp_new();
+ TCGv tmpc_1 = tcg_temp_new();
+ TCGv tmpd = tcg_temp_new();
+
+ tcg_gen_movi_i64(tmpc_0, (uint64_t)(((uint64_t)disp8)));
+ tcg_gen_movi_i64(tmpc_1, (uint64_t)(((uint64_t)disp8 << 32)));
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_ext32s_i64(tmpa, cpu_fr[ra + i]);
+ tcg_gen_ext32u_i64(tmpb, cpu_fr[rb + i]);
+ if (mask) tcg_gen_andi_i64(tmpa, tmpa, mask);
+ tcg_gen_movcond_i64(cond, tmpd, tmpa, t0, tmpb, tmpc_0);
+
+ tcg_gen_andi_i64(tmpa, cpu_fr[ra + i], 0xffffffff00000000UL);
+ tcg_gen_andi_i64(tmpb, cpu_fr[rb + i], 0xffffffff00000000UL);
+ if (mask) tcg_gen_andi_i64(tmpa, tmpa, (uint64_t)mask << 32);
+ tcg_gen_movcond_i64(cond, cpu_fr[rd + i], tmpa, t0, tmpb, tmpc_1);
+
+ tcg_gen_or_i64(cpu_fr[rd + i], cpu_fr[rd + i], tmpd);
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(tmpa);
+ tcg_temp_free(tmpb);
+ tcg_temp_free(tmpc_0);
+ tcg_temp_free(tmpc_1);
+ tcg_temp_free(tmpd);
+}
+
+DisasJumpType translate_one(DisasContextBase *dcbase, uint32_t insn,
+ CPUState *cpu)
+{
+ int32_t disp5, disp8, disp12, disp13, disp16, disp21, disp26 __attribute__((unused));
+ uint8_t opc, ra, rb, rc, rd;
+ uint16_t fn3, fn4, fn6, fn8, fn11;
+ int32_t i;
+ TCGv va, vb, vc, vd;
+ TCGv_i32 tmp32;
+ TCGv_i64 tmp64, tmp64_0, tmp64_1, shift;
+ TCGv_i32 tmpa, tmpb, tmpc;
+ DisasJumpType ret;
+ DisasContext* ctx = container_of(dcbase, DisasContext, base);
+
+ opc = extract32(insn, 26, 6);
+ ra = extract32(insn, 21, 5);
+ rb = extract32(insn, 16, 5);
+ rc = extract32(insn, 0, 5);
+ rd = extract32(insn, 5, 5);
+
+ fn3 = extract32(insn, 10, 3);
+ fn6 = extract32(insn, 10, 6);
+ fn4 = extract32(insn, 12, 4);
+ fn8 = extract32(insn, 5, 8);
+ fn11 = extract32(insn, 5, 11);
+
+ disp5 = extract32(insn, 5, 5);
+ disp8 = extract32(insn, 13, 8);
+ disp12 = sextract32(insn, 0, 12);
+ disp13 = sextract32(insn, 13, 13);
+ disp16 = sextract32(insn, 0, 16);
+ disp21 = sextract32(insn, 0, 21);
+ disp26 = sextract32(insn, 0, 26);
+
+ ret = DISAS_NEXT;
+ insn_profile(ctx, insn);
+
+ switch (opc) {
+ case 0x00:
+ /* SYS_CALL */
+ ret = gen_sys_call(ctx, insn & 0x1ffffff);
+ break;
+ case 0x01:
+ /* CALL */
+ case 0x02:
+ /* RET */
+ case 0x03:
+ /* JMP */
+ vb = load_gir(ctx, rb);
+ tcg_gen_addi_i64(cpu_pc, vb, ctx->base.pc_next & 0x3);
+ if (ra != 31) {
+ tcg_gen_movi_i64(load_gir(ctx, ra), ctx->base.pc_next & (~3UL));
+ }
+ ret = DISAS_PC_UPDATED;
+ break;
+ case 0x04:
+ /* BR */
+ case 0x05:
+ /* BSR */
+ ret = gen_bdirect(ctx, ra, disp21);
+ break;
+ case 0x06:
+ switch (disp16) {
+ case 0x0000:
+ /* MEMB */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ break;
+ case 0x0001:
+ /* IMEMB */
+ /* No achievement in Qemu*/
+ break;
+ case 0x0020:
+ /* RTC */
+ if (disp16 && unlikely(ra == 31)) break;
+ va = load_gir(ctx, ra);
+ gen_helper_rtc(va);
+ break;
+ case 0x0040:
+ /* RCID */
+ if (disp16 && unlikely(ra == 31)) break;
+ va = load_gir(ctx, ra);
+ read_csr(0xc4, va);
+ break;
+ case 0x0080:
+ /* HALT */
+#ifndef CONFIG_USER_ONLY
+ {
+ tmp32 = tcg_const_i32(1);
+ tcg_gen_st_i32(
+ tmp32, cpu_env,
+ -offsetof(SW64CPU, env) + offsetof(CPUState, halted));
+ tcg_temp_free_i32(tmp32);
+ }
+ ret = gen_excp(ctx, EXCP_HALTED, 0);
+#endif
+ break;
+ case 0x1000:
+ /* RD_F */
+ if (disp16 && unlikely(ra == 31)) break;
+ va = load_gir(ctx, ra);
+ tcg_gen_mov_i64(va, cpu_lock_success);
+ break;
+ case 0x1020:
+ /* WR_F */
+ if (disp16 && unlikely(ra == 31)) break;
+ va = load_gir(ctx, ra);
+ tcg_gen_andi_i64(cpu_lock_flag, va, 0x1);
+ break;
+ case 0x1040:
+ /* RTID */
+ if (unlikely(ra == 31)) break;
+ va = load_gir(ctx, ra);
+ read_csr(0xc7, va);
+ break;
+ default:
+ if ((disp16 & 0xFF00) == 0xFE00) {
+ /* PRI_RCSR */
+ if (disp16 && unlikely(ra == 31)) break;
+ va = load_gir(ctx, ra);
+ read_csr(disp16 & 0xff, va);
+ break;
+ }
+ if ((disp16 & 0xFF00) == 0xFF00) {
+ /* PRI_WCSR */
+ va = load_gir(ctx, ra);
+ write_csr(disp16 & 0xff, va, ctx->env);
+ break;
+ }
+ goto do_invalid;
+ }
+ break;
+ case 0x07:
+ /* PRI_RET */
+ va = load_gir(ctx, ra);
+ tcg_gen_mov_i64(cpu_pc, va);
+ gen_helper_cpustate_update(cpu_env, va);
+ ret = DISAS_PC_UPDATED_NOCHAIN;
+ break;
+ case 0x08:
+ switch (fn4) {
+ case 0x0:
+ /* LLDW */
+ gen_load_mem(ctx, &gen_qemu_lldw, ra, rb, disp12, 0, 0);
+ break;
+ case 0x1:
+ /* LLDL */
+ gen_load_mem(ctx, &gen_qemu_lldl, ra, rb, disp12, 0, 0);
+ break;
+ case 0x2:
+ /* LDW_INC */
+ ldx_xxx(ctx, ra, rb, disp12, 0, 1);
+ break;
+ case 0x3:
+ /* LDL_INC */
+ ldx_xxx(ctx, ra, rb, disp12, 1, 1);
+ break;
+ case 0x4:
+ /* LDW_DEC */
+ ldx_xxx(ctx, ra, rb, disp12, 0, -1);
+ break;
+ case 0x5:
+ /* LDL_DEC */
+ ldx_xxx(ctx, ra, rb, disp12, 1, -1);
+ break;
+ case 0x6:
+ /* LDW_SET */
+ ldx_set(ctx, ra, rb, disp12, 0);
+ break;
+ case 0x7:
+ /* LDL_SET */
+ ldx_set(ctx, ra, rb, disp12, 1);
+ break;
+ case 0x8:
+ /* LSTW */
+ ret = gen_store_conditional(ctx, ra, rb, disp12,
+ ctx->mem_idx, MO_LEUL);
+ break;
+ case 0x9:
+ /* LSTL */
+ ret = gen_store_conditional(ctx, ra, rb, disp12,
+ ctx->mem_idx, MO_LEQ);
+ break;
+ case 0xa:
+ /* LDW_NC */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp12, 0,
+ 0);
+ break;
+ case 0xb:
+ /* LDL_NC */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp12, 0, 0);
+ break;
+ case 0xc:
+ /* LDD_NC */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp12, 1, 0);
+ break;
+ case 0xd:
+ /* STW_NC */
+ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp12, 0,
+ 0);
+ break;
+ case 0xe:
+ /* STL_NC */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp12, 0,
+ 0);
+ break;
+ case 0xf:
+ /* STD_NC */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp12, 1,
+ 0);
+ break;
+ default:
+ goto do_invalid;
+ }
+ break;
+ case 0x9:
+ /* LDWE */
+ gen_load_mem_simd(ctx, &gen_qemu_ldwe, ra, rb, disp16, 0);
+ break;
+ case 0x0a:
+ /* LDSE */
+ gen_load_mem_simd(ctx, &gen_qemu_ldse, ra, rb, disp16, 0);
+ break;
+ case 0x0b:
+ /* LDDE */
+ gen_load_mem_simd(ctx, &gen_qemu_ldde, ra, rb, disp16, 0);
+ break;
+ case 0x0c:
+ /* VLDS */
+ gen_load_mem_simd(ctx, &gen_qemu_vlds, ra, rb, disp16, 0);
+ break;
+ case 0x0d:
+ /* VLDD */
+ if (unlikely(ra == 31)) break;
+ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp16, 0);
+ break;
+ case 0x0e:
+ /* VSTS */
+ gen_store_mem_simd(ctx, &gen_qemu_vsts, ra, rb, disp16, 0);
+ break;
+ case 0x0f:
+ /* VSTD */
+ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp16, 0);
+ break;
+ case 0x10:
+ if (unlikely(rc == 31)) break;
+ if (fn11 == 0x70) {
+ /* FIMOVS */
+ va = cpu_fr[ra];
+ vc = load_gir(ctx, rc);
+ tmp32 = tcg_temp_new_i32();
+ gen_helper_s_to_memory(tmp32, va);
+ tcg_gen_ext_i32_i64(vc, tmp32);
+ tcg_temp_free_i32(tmp32);
+ } else if (fn11 == 0x78) {
+ /* FIMOVD */
+ va = cpu_fr[ra];
+ vc = load_gir(ctx, rc);
+ tcg_gen_mov_i64(vc, va);
+ } else {
+ va = load_gir(ctx, ra);
+ vb = load_gir(ctx, rb);
+ vc = load_gir(ctx, rc);
+ cal_with_iregs_2(ctx, vc, va, vb, disp13, fn11);
+ }
+ break;
+ case 0x11:
+ if (unlikely(rc == 31)) break;
+ va = load_gir(ctx, ra);
+ vb = load_gir(ctx, rb);
+ vc = load_gir(ctx, rc);
+ vd = load_gir(ctx, rd);
+ cal_with_iregs_3(ctx, vc, va, vb, vd, fn3);
+ break;
+ case 0x12:
+ if (unlikely(rc == 31)) break;
+ va = load_gir(ctx, ra);
+ vc = load_gir(ctx, rc);
+ cal_with_imm_2(ctx, vc, va, disp8, fn8);
+ break;
+ case 0x13:
+ if (rc == 31) /* Special deal */
+ break;
+ va = load_gir(ctx, ra);
+ vc = load_gir(ctx, rc);
+ vd = load_gir(ctx, rd);
+ cal_with_imm_3(ctx, vc, va, disp8, vd, fn3);
+ break;
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ /* VLOGZZ */
+ tcg_gen_vlogzz_i64(ctx, opc, ra, rb, rd, rc, fn6);
+ break;
+ case 0x18:
+ if (unlikely(rc == 31)) break;
+ cal_with_fregs_2(ctx, rc, ra, rb, fn8);
+ break;
+ case 0x19:
+ if (unlikely(rc == 31)) break;
+ cal_with_fregs_4(ctx, rc, ra, rb, rd, fn6);
+ break;
+ case 0x1A:
+ /* SIMD */
+ if (unlikely(rc == 31)) break;
+ switch (fn8) {
+ case 0x00:
+ /* VADDW */
+ tmp64 = tcg_temp_new();
+ va = tcg_temp_new();
+ vb = tcg_temp_new();
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL);
+ tcg_gen_andi_i64(vb, cpu_fr[rb + i], 0xffffffffUL);
+ tcg_gen_add_i64(tmp64, va, vb);
+ tcg_gen_ext32u_i64(tmp64, tmp64);
+ tcg_gen_andi_i64(va, cpu_fr[ra + i],
+ 0xffffffff00000000UL);
+ tcg_gen_andi_i64(vb, cpu_fr[rb + i],
+ 0xffffffff00000000UL);
+ tcg_gen_add_i64(vc, va, vb);
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64);
+ }
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x20:
+ /* VADDW */
+ tmp64 = tcg_temp_new();
+ va = tcg_temp_new();
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL);
+ tcg_gen_addi_i64(tmp64, va, disp8);
+ tcg_gen_ext32u_i64(tmp64, tmp64);
+ tcg_gen_andi_i64(va, cpu_fr[ra + i],
+ 0xffffffff00000000UL);
+ tcg_gen_addi_i64(vc, va, ((uint64_t)disp8 << 32));
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64);
+ }
+ tcg_temp_free(va);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x01:
+ /* VSUBW */
+ tmp64 = tcg_temp_new();
+ va = tcg_temp_new();
+ vb = tcg_temp_new();
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL);
+ tcg_gen_andi_i64(vb, cpu_fr[rb + i], 0xffffffffUL);
+ tcg_gen_sub_i64(tmp64, va, vb);
+ tcg_gen_ext32u_i64(tmp64, tmp64);
+ tcg_gen_andi_i64(va, cpu_fr[ra + i],
+ 0xffffffff00000000UL);
+ tcg_gen_andi_i64(vb, cpu_fr[rb + i],
+ 0xffffffff00000000UL);
+ tcg_gen_sub_i64(vc, va, vb);
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64);
+ }
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x21:
+ /* VSUBW */
+ tmp64 = tcg_temp_new();
+ va = tcg_temp_new();
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL);
+ tcg_gen_subi_i64(tmp64, va, disp8);
+ tcg_gen_ext32u_i64(tmp64, tmp64);
+ tcg_gen_andi_i64(va, cpu_fr[ra + i],
+ 0xffffffff00000000UL);
+ tcg_gen_subi_i64(vc, va, ((uint64_t)disp8 << 32));
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64);
+ }
+ tcg_temp_free(va);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x02:
+ /* VCMPGEW */
+ tmp64 = tcg_const_i64(0);
+ va = tcg_temp_new();
+ vb = tcg_temp_new();
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]);
+ tcg_gen_ext32s_i64(vb, cpu_fr[rb + i]);
+ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb);
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32);
+ tcg_gen_shri_i64(vb, cpu_fr[rb + i], 32);
+ tcg_gen_ext32s_i64(va, va);
+ tcg_gen_ext32s_i64(vb, vb);
+ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb);
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ }
+ tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x22:
+ /* VCMPGEW */
+ tmp64 = tcg_const_i64(0);
+ va = tcg_temp_new();
+ vb = tcg_const_i64(disp8);
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]);
+ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb);
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32);
+ tcg_gen_ext32s_i64(va, va);
+ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb);
+ tcg_gen_or_i64(tmp64, tmp64, vc);
+ }
+ tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x03:
+ /* VCMPEQW */
+ gen_qemu_vcmpxxw_i64(TCG_COND_EQ, ra, rb, rc);
+ break;
+ case 0x23:
+ /* VCMPEQW */
+ gen_qemu_vcmpxxwi_i64(TCG_COND_EQ, ra, disp8, rc);
+ break;
+ case 0x04:
+ /* VCMPLEW */
+ gen_qemu_vcmpxxw_i64(TCG_COND_LE, ra, rb, rc);
+ break;
+ case 0x24:
+ /* VCMPLEW */
+ gen_qemu_vcmpxxwi_i64(TCG_COND_LE, ra, disp8, rc);
+ break;
+ case 0x05:
+ /* VCMPLTW */
+ gen_qemu_vcmpxxw_i64(TCG_COND_LT, ra, rb, rc);
+ break;
+ case 0x25:
+ /* VCMPLTW */
+ gen_qemu_vcmpxxwi_i64(TCG_COND_LT, ra, disp8, rc);
+ break;
+ case 0x06:
+ /* VCMPULEW */
+ gen_qemu_vcmpxxw_i64(TCG_COND_LEU, ra, rb, rc);
+ break;
+ case 0x26:
+ /* VCMPULEW */
+ gen_qemu_vcmpxxwi_i64(TCG_COND_LEU, ra, disp8, rc);
+ break;
+ case 0x07:
+ /* VCMPULTW */
+ gen_qemu_vcmpxxw_i64(TCG_COND_LTU, ra, rb, rc);
+ break;
+ case 0x27:
+ /* VCMPULTW */
+ gen_qemu_vcmpxxwi_i64(TCG_COND_LTU, ra, disp8, rc);
+ break;
+ case 0x08:
+ /* VSLLW */
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_shri_i64(shift, cpu_fr[rb], 29);
+ tcg_gen_andi_i64(shift, shift, 0x1fUL);
+
+ tcg_gen_shl_i64(vc, cpu_fr[ra + i], shift);
+ tcg_gen_ext32u_i64(tmp64, vc);
+
+ tcg_gen_andi_i64(vc, cpu_fr[ra + i],
+ 0xffffffff00000000UL);
+ tcg_gen_shl_i64(vc, vc, shift);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x28:
+ /* VSLLW */
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_movi_i64(shift, disp8 & 0x1fUL);
+
+ tcg_gen_shl_i64(vc, cpu_fr[ra + i], shift);
+ tcg_gen_ext32u_i64(tmp64, vc);
+
+ tcg_gen_andi_i64(vc, cpu_fr[ra + i],
+ 0xffffffff00000000UL);
+ tcg_gen_shl_i64(vc, vc, shift);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x09:
+ /* VSRLW */
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_shri_i64(shift, cpu_fr[rb], 29);
+ tcg_gen_andi_i64(shift, shift, 0x1fUL);
+
+ tcg_gen_ext32u_i64(vc, cpu_fr[ra + i]);
+ tcg_gen_shr_i64(tmp64, vc, shift);
+
+ tcg_gen_shr_i64(vc, cpu_fr[ra + i], shift);
+ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x29:
+ /* VSRLW */
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_movi_i64(shift, disp8 & 0x1fUL);
+
+ tcg_gen_ext32u_i64(vc, cpu_fr[ra + i]);
+ tcg_gen_shr_i64(tmp64, vc, shift);
+
+ tcg_gen_shr_i64(vc, cpu_fr[ra + i], shift);
+ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x0A:
+ /* VSRAW */
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_shri_i64(shift, cpu_fr[rb], 29);
+ tcg_gen_andi_i64(shift, shift, 0x1fUL);
+
+ tcg_gen_ext32s_i64(vc, cpu_fr[ra + i]);
+ tcg_gen_sar_i64(tmp64, vc, shift);
+
+ tcg_gen_sar_i64(vc, cpu_fr[ra + i], shift);
+ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x2A:
+ /* VSRAWI */
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_movi_i64(shift, disp8 & 0x1fUL);
+
+ tcg_gen_ext32s_i64(vc, cpu_fr[ra + i]);
+ tcg_gen_sar_i64(tmp64, vc, shift);
+
+ tcg_gen_sar_i64(vc, cpu_fr[ra + i], shift);
+ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL);
+ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x0B:
+ /* VROLW */
+ tmpa = tcg_temp_new_i32();
+ tmpb = tcg_temp_new_i32();
+ tmpc = tcg_temp_new_i32();
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_shri_i64(shift, cpu_fr[rb], 29);
+ tcg_gen_andi_i64(shift, shift, 0x1fUL);
+
+ tcg_gen_extrl_i64_i32(tmpa, cpu_fr[ra + i]);
+ tcg_gen_extrl_i64_i32(tmpb, shift);
+
+ tcg_gen_rotl_i32(tmpc, tmpa, tmpb);
+ tcg_gen_extu_i32_i64(tmp64, tmpc);
+
+ tcg_gen_extrh_i64_i32(tmpa, cpu_fr[ra + i]);
+ tcg_gen_rotl_i32(tmpc, tmpa, tmpb);
+ tcg_gen_extu_i32_i64(vc, tmpc);
+ tcg_gen_shli_i64(vc, vc, 32);
+
+ tcg_gen_or_i64(cpu_fr[rc + i], vc, tmp64);
+ }
+ tcg_temp_free_i32(tmpa);
+ tcg_temp_free_i32(tmpb);
+ tcg_temp_free_i32(tmpc);
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x2B:
+ /* VROLW */
+ tmpa = tcg_temp_new_i32();
+ tmpb = tcg_temp_new_i32();
+ tmpc = tcg_temp_new_i32();
+ tmp64 = tcg_temp_new();
+ shift = tcg_temp_new();
+ vc = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_movi_i64(shift, disp8 & 0x1fUL);
+
+ tcg_gen_extrl_i64_i32(tmpa, cpu_fr[ra + i]);
+ tcg_gen_extrl_i64_i32(tmpb, shift);
+
+ tcg_gen_rotl_i32(tmpc, tmpa, tmpb);
+ tcg_gen_extu_i32_i64(tmp64, tmpc);
+
+ tcg_gen_extrh_i64_i32(tmpa, cpu_fr[ra + i]);
+ tcg_gen_rotl_i32(tmpc, tmpa, tmpb);
+ tcg_gen_extu_i32_i64(vc, tmpc);
+ tcg_gen_shli_i64(vc, vc, 32);
+
+ tcg_gen_or_i64(cpu_fr[rc + i], vc, tmp64);
+ }
+ tcg_temp_free_i32(tmpa);
+ tcg_temp_free_i32(tmpb);
+ tcg_temp_free_i32(tmpc);
+ tcg_temp_free(tmp64);
+ tcg_temp_free(shift);
+ tcg_temp_free(vc);
+ break;
+ case 0x0C:
+ /* SLLOW */
+ tcg_gen_sllow_i64(ra, rc, rb);
+ break;
+ case 0x2C:
+ /* SLLOW */
+ tcg_gen_sllowi_i64(ra, rc, disp8);
+ break;
+ case 0x0D:
+ /* SRLOW */
+ tcg_gen_srlow_i64(ra, rc, rb);
+ break;
+ case 0x2D:
+ /* SRLOW */
+ tcg_gen_srlowi_i64(ra, rc, disp8);
+ break;
+ case 0x0E:
+ /* VADDL */
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_add_i64(cpu_fr[rc + i], cpu_fr[ra + i],
+ cpu_fr[rb + i]);
+ }
+ break;
+ case 0x2E:
+ /* VADDL */
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_addi_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8);
+ }
+ break;
+ case 0x0F:
+ /* VSUBL */
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_sub_i64(cpu_fr[rc + i], cpu_fr[ra + i],
+ cpu_fr[rb + i]);
+ }
+ break;
+ case 0x2F:
+ /* VSUBL */
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_subi_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8);
+ }
+ break;
+ case 0x18:
+ /* CTPOPOW */
+ tmp64 = tcg_const_i64(0);
+ tmp64_0 = tcg_temp_new();
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_ctpop_i64(tmp64_0, cpu_fr[ra + i]);
+ tcg_gen_add_i64(tmp64, tmp64, tmp64_0);
+ }
+ tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29);
+ tcg_temp_free(tmp64);
+ tcg_temp_free(tmp64_0);
+ break;
+ case 0x19:
+ /* CTLZOW */
+ va = tcg_const_i64(ra);
+ gen_helper_ctlzow(cpu_fr[rc], cpu_env, va);
+ tcg_temp_free(va);
+ break;
+ case 0x40:
+ /* VUCADDW */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(rb);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucaddw(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x60:
+ /* VUCADDW */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(disp8);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucaddwi(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x41:
+ /* VUCSUBW */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(rb);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucsubw(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x61:
+ /* VUCSUBW */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(disp8);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucsubwi(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x42:
+ /* VUCADDH */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(rb);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucaddh(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x62:
+ /* VUCADDH */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(disp8);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucaddhi(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x43:
+ /* VUCSUBH */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(rb);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucsubh(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x63:
+ /* VUCSUBH */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(disp8);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucsubhi(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x44:
+ /* VUCADDB */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(rb);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucaddb(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x64:
+ /* VUCADDB */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(disp8);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucaddbi(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x45:
+ /* VUCSUBB */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(rb);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucsubb(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x65:
+ /* VUCSUBB */
+ va = tcg_const_i64(ra);
+ vb = tcg_const_i64(disp8);
+ vc = tcg_const_i64(rc);
+ gen_helper_vucsubbi(cpu_env, va, vb, vc);
+ tcg_temp_free(va);
+ tcg_temp_free(vb);
+ tcg_temp_free(vc);
+ break;
+ case 0x80:
+ /* VADDS */
+ for (i = 0; i < 128; i += 32)
+ gen_fadds(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x81:
+ /* VADDD */
+ for (i = 0; i < 128; i += 32)
+ gen_faddd(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x82:
+ /* VSUBS */
+ for (i = 0; i < 128; i += 32)
+ gen_fsubs(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x83:
+ /* VSUBD */
+ for (i = 0; i < 128; i += 32)
+ gen_fsubd(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x84:
+ /* VMULS */
+ for (i = 0; i < 128; i += 32)
+ gen_fmuls(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x85:
+ /* VMULD */
+ for (i = 0; i < 128; i += 32)
+ gen_fmuld(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x86:
+ /* VDIVS */
+ for (i = 0; i < 128; i += 32)
+ gen_fdivs(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x87:
+ /* VDIVD */
+ for (i = 0; i < 128; i += 32)
+ gen_fdivd(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x88:
+ /* VSQRTS */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fsqrts(cpu_fr[rc + i], cpu_env,
+ cpu_fr[rb + i]);
+ break;
+ case 0x89:
+ /* VSQRTD */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fsqrt(cpu_fr[rc + i], cpu_env,
+ cpu_fr[rb + i]);
+ break;
+ case 0x8C:
+ /* VFCMPEQ */
+ for (i = 0; i < 128; i += 32)
+ gen_fcmpeq(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x8D:
+ /* VFCMPLE */
+ for (i = 0; i < 128; i += 32)
+ gen_fcmple(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x8E:
+ /* VFCMPLT */
+ for (i = 0; i < 128; i += 32)
+ gen_fcmplt(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x8F:
+ /* VFCMPUN */
+ for (i = 0; i < 128; i += 32)
+ gen_fcmpun(ctx, ra + i, rb + i, rc + i);
+ break;
+ case 0x90:
+ /* VCPYS */
+ tcg_gen_vcpys_i64(ra, rb, rc);
+ break;
+ case 0x91:
+ /* VCPYSE */
+ tcg_gen_vcpyse_i64(ra, rb, rc);
+ break;
+ case 0x92:
+ /* VCPYSN */
+ tcg_gen_vcpysn_i64(ra, rb, rc);
+ break;
+ case 0x93:
+ /* VSUMS */
+ gen_fadds(ctx, ra, ra + 32, rc);
+ gen_fadds(ctx, rc, ra + 64, rc);
+ gen_fadds(ctx, rc, ra + 96, rc);
+ break;
+ case 0x94:
+ /* VSUMD */
+ gen_faddd(ctx, ra, ra + 32, rc);
+ gen_faddd(ctx, rc, ra + 64, rc);
+ gen_faddd(ctx, rc, ra + 96, rc);
+ break;
+ default:
+ printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn8);
+ ret = gen_invalid(ctx);
+ break;
+ }
+ break;
+ case 0x1B:
+ /* SIMD */
+ if (unlikely(rc == 31)) break;
+ switch (fn6) {
+ case 0x00:
+ /* VMAS */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fmas(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i],
+ cpu_fr[rb + i], cpu_fr[rd + i]);
+ break;
+ case 0x01:
+ /* VMAD */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fmad(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i],
+ cpu_fr[rb + i], cpu_fr[rd + i]);
+ break;
+ case 0x02:
+ /* VMSS */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fmss(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i],
+ cpu_fr[rb + i], cpu_fr[rd + i]);
+ break;
+ case 0x03:
+ /* VMSD */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fmsd(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i],
+ cpu_fr[rb + i], cpu_fr[rd + i]);
+ break;
+ case 0x04:
+ /* VNMAS */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fnmas(cpu_fr[rc + i], cpu_env,
+ cpu_fr[ra + i], cpu_fr[rb + i],
+ cpu_fr[rd + i]);
+ break;
+ case 0x05:
+ /* VNMAD */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fnmad(cpu_fr[rc + i], cpu_env,
+ cpu_fr[ra + i], cpu_fr[rb + i],
+ cpu_fr[rd + i]);
+ break;
+ case 0x06:
+ /* VNMSS */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fnmss(cpu_fr[rc + i], cpu_env,
+ cpu_fr[ra + i], cpu_fr[rb + i],
+ cpu_fr[rd + i]);
+ break;
+ case 0x07:
+ /* VNMSD */
+ for (i = 0; i < 128; i += 32)
+ gen_helper_fnmsd(cpu_fr[rc + i], cpu_env,
+ cpu_fr[ra + i], cpu_fr[rb + i],
+ cpu_fr[rd + i]);
+ break;
+ case 0x10:
+ /* VFSELEQ */
+ tmp64 = tcg_temp_new();
+ tmp64_0 = tcg_const_i64(0);
+ for (i = 0; i < 128; i += 32) {
+ gen_helper_fcmpeq(tmp64, cpu_env, cpu_fr[ra + i],
+ tmp64_0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64,
+ tmp64_0, cpu_fr[rd + i],
+ cpu_fr[rb + i]);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(tmp64_0);
+ break;
+ case 0x12:
+ /* VFSELLT */
+ tmp64 = tcg_temp_new();
+ tmp64_0 = tcg_const_i64(0);
+ tmp64_1 = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_andi_i64(tmp64, cpu_fr[ra + i],
+ 0x7fffffffffffffffUL);
+ tcg_gen_setcond_i64(TCG_COND_NE, tmp64, tmp64,
+ tmp64_0);
+ tcg_gen_shri_i64(tmp64_1, cpu_fr[ra +i], 63);
+ tcg_gen_and_i64(tmp64, tmp64_1, tmp64);
+ tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64,
+ tmp64_0, cpu_fr[rd + i],
+ cpu_fr[rb + i]);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(tmp64_0);
+ tcg_temp_free(tmp64_1);
+ break;
+ case 0x13:
+ /* VFSELLE */
+ tmp64 = tcg_temp_new();
+ tmp64_0 = tcg_const_i64(0);
+ tmp64_1 = tcg_temp_new();
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_andi_i64(tmp64, cpu_fr[ra + i],
+ 0x7fffffffffffffffUL);
+ tcg_gen_setcond_i64(TCG_COND_EQ, tmp64, tmp64,
+ tmp64_0);
+ tcg_gen_shri_i64(tmp64_1, cpu_fr[ra + i], 63);
+ tcg_gen_or_i64(tmp64, tmp64_1, tmp64);
+ tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64,
+ tmp64_0, cpu_fr[rd + i],
+ cpu_fr[rb + i]);
+ }
+ tcg_temp_free(tmp64);
+ tcg_temp_free(tmp64_0);
+ tcg_temp_free(tmp64_1);
+ break;
+ case 0x18:
+ /* VSELEQW */
+ gen_qemu_vselxxw(TCG_COND_EQ, ra, rb, rd, rc, 0);
+ break;
+ case 0x38:
+ /* VSELEQW */
+ gen_qemu_vselxxwi(TCG_COND_EQ, ra, rb, disp5, rc, 0);
+ break;
+ case 0x19:
+ /* VSELLBCW */
+ gen_qemu_vselxxw(TCG_COND_EQ, ra, rb, rd, rc, 1);
+ break;
+ case 0x39:
+ /* VSELLBCW */
+ gen_qemu_vselxxwi(TCG_COND_EQ, ra, rb, disp5, rc, 1);
+ break;
+ case 0x1A:
+ /* VSELLTW */
+ gen_qemu_vselxxw(TCG_COND_LT, ra, rb, rd, rc, 0);
+ break;
+ case 0x3A:
+ /* VSELLTW */
+ gen_qemu_vselxxwi(TCG_COND_LT, ra, rb, disp5, rc, 0);
+ break;
+ case 0x1B:
+ /* VSELLEW */
+ gen_qemu_vselxxw(TCG_COND_LE, ra, rb, rd, rc, 0);
+ break;
+ case 0x3B:
+ /* VSELLEW */
+ gen_qemu_vselxxwi(TCG_COND_LE, ra, rb, disp5, rc, 0);
+ break;
+ case 0x20:
+ /* VINSW */
+ if (disp5 > 7) break;
+ tmp64 = tcg_temp_new();
+ tmp32 = tcg_temp_new_i32();
+ gen_helper_s_to_memory(tmp32, cpu_fr[ra]);
+ tcg_gen_extu_i32_i64(tmp64, tmp32);
+ tcg_gen_shli_i64(tmp64, tmp64, (disp5 % 2) * 32);
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[rb + i]);
+ }
+ if (disp5 % 2) {
+ tcg_gen_andi_i64(cpu_fr[rc + (disp5 / 2) * 32],
+ cpu_fr[rc + (disp5 / 2) * 32],
+ 0xffffffffUL);
+ } else {
+ tcg_gen_andi_i64(cpu_fr[rc + (disp5 / 2) * 32],
+ cpu_fr[rc + (disp5 / 2) * 32],
+ 0xffffffff00000000UL);
+ }
+ tcg_gen_or_i64(cpu_fr[rc + (disp5 / 2) * 32],
+ cpu_fr[rc + (disp5 / 2) * 32], tmp64);
+ tcg_temp_free(tmp64);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x21:
+ /* VINSF */
+ if (disp5 > 3) break;
+ tmp64 = tcg_temp_new();
+ tcg_gen_mov_i64(tmp64, cpu_fr[ra]);
+
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[rb + i]);
+ }
+ tcg_gen_mov_i64(cpu_fr[rc + disp5 * 32], tmp64);
+ tcg_temp_free(tmp64);
+ break;
+ case 0x22:
+ /* VEXTW */
+ if (disp5 > 7) break;
+ tmp64 = tcg_temp_new();
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra + (disp5 / 2) * 32],
+ (disp5 % 2) * 32);
+ tcg_gen_extrl_i64_i32(tmp32, tmp64);
+ gen_helper_memory_to_s(tmp64, tmp32);
+ tcg_gen_mov_i64(cpu_fr[rc], tmp64);
+ tcg_temp_free(tmp64);
+ tcg_temp_free_i32(tmp32);
+ break;
+ case 0x23:
+ /* VEXTF */
+ if (disp5 > 3) break;
+ tcg_gen_mov_i64(cpu_fr[rc], cpu_fr[ra + disp5 * 32]);
+ break;
+ case 0x24:
+ /* VCPYW */
+ tmp64 = tcg_temp_new();
+ tmp64_0 = tcg_temp_new();
+ /* FIXME: for debug
+ tcg_gen_movi_i64(tmp64, ra);
+ gen_helper_v_print(cpu_env, tmp64);
+ */
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 29);
+ tcg_gen_andi_i64(tmp64_0, tmp64, 0x3fffffffUL);
+ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 62);
+ tcg_gen_shli_i64(tmp64, tmp64, 30);
+ tcg_gen_or_i64(tmp64_0, tmp64, tmp64_0);
+ tcg_gen_mov_i64(tmp64, tmp64_0);
+ tcg_gen_shli_i64(tmp64, tmp64, 32);
+ tcg_gen_or_i64(tmp64_0, tmp64_0, tmp64);
+ tcg_gen_mov_i64(cpu_fr[rc], tmp64_0);
+ tcg_gen_mov_i64(cpu_fr[rc + 32], cpu_fr[rc]);
+ tcg_gen_mov_i64(cpu_fr[rc + 64], cpu_fr[rc]);
+ tcg_gen_mov_i64(cpu_fr[rc + 96], cpu_fr[rc]);
+ /* FIXME: for debug
+ tcg_gen_movi_i64(tmp64, rb);
+ gen_helper_v_print(cpu_env, tmp64);
+ tcg_gen_movi_i64(tmp64, rc);
+ gen_helper_v_print(cpu_env, tmp64);
+ */
+ tcg_temp_free(tmp64);
+ tcg_temp_free(tmp64_0);
+ break;
+ case 0x25:
+ /* VCPYF */
+ for (i = 0; i < 128; i += 32) {
+ tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[ra]);
+ }
+ break;
+ case 0x26:
+ /* VCONW */
+ tmp64 = tcg_const_i64(ra << 8 | rb);
+ tmp64_0 = tcg_temp_new();
+ vd = tcg_const_i64(rc);
+ tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 2);
+ tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x7ul);
+ gen_helper_vconw(cpu_env, tmp64, vd, tmp64_0);
+ tcg_temp_free(tmp64_0);
+ tcg_temp_free(tmp64);
+ tcg_temp_free(vd);
+ break;
+ case 0x27:
+ /* VSHFW */
+ tmp64 = tcg_const_i64(ra << 8 | rb);
+ vd = tcg_const_i64(rc);
+ gen_helper_vshfw(cpu_env, tmp64, vd, cpu_fr[rd]);
+ tcg_temp_free(tmp64);
+ tcg_temp_free(vd);
+ break;
+ case 0x28:
+ /* VCONS */
+ tmp64 = tcg_const_i64(ra << 8 | rb);
+ tmp64_0 = tcg_temp_new();
+ vd = tcg_const_i64(rc);
+ tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 2);
+ tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x3ul);
+ gen_helper_vcond(cpu_env, tmp64, vd, tmp64_0);
+ tcg_temp_free(tmp64_0);
+ tcg_temp_free(tmp64);
+ tcg_temp_free(vd);
+ break;
+ case 0x29:
+ /* FIXME: VCOND maybe it's wrong in the instruction book
+ * that there are no temp. */
+ tmp64 = tcg_const_i64(ra << 8 | rb);
+ tmp64_0 = tcg_temp_new();
+ vd = tcg_const_i64(rc);
+ tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 3);
+ tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x3ul);
+ gen_helper_vcond(cpu_env, tmp64, vd, tmp64_0);
+ tcg_temp_free(tmp64_0);
+ tcg_temp_free(tmp64);
+ tcg_temp_free(vd);
+ break;
+ default:
+ printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn6);
+ ret = gen_invalid(ctx);
+ break;
+ }
+ break;
+ case 0x1C:
+ switch (fn4) {
+ case 0x0:
+ /* VLDW_U */
+ if (unlikely(ra == 31)) break;
+ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12,
+ ~0x1fUL);
+ break;
+ case 0x1:
+ /* VSTW_U */
+ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12,
+ ~0x1fUL);
+ break;
+ case 0x2:
+ /* VLDS_U */
+ if (unlikely(ra == 31)) break;
+ gen_load_mem_simd(ctx, &gen_qemu_vlds, ra, rb, disp12,
+ ~0xfUL);
+ break;
+ case 0x3:
+ /* VSTS_U */
+ gen_store_mem_simd(ctx, &gen_qemu_vsts, ra, rb, disp12,
+ ~0xfUL);
+ break;
+ case 0x4:
+ /* VLDD_U */
+ if (unlikely(ra == 31)) break;
+ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12,
+ ~0x1fUL);
+ break;
+ case 0x5:
+ /* VSTD_U */
+ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12,
+ ~0x1fUL);
+ break;
+ case 0x8:
+ /* VSTW_UL */
+ gen_store_mem_simd(ctx, &gen_qemu_vstw_ul, ra, rb, disp12,
+ 0);
+ break;
+ case 0x9:
+ /* VSTW_UH */
+ gen_store_mem_simd(ctx, &gen_qemu_vstw_uh, ra, rb, disp12,
+ 0);
+ break;
+ case 0xa:
+ /* VSTS_UL */
+ gen_store_mem_simd(ctx, &gen_qemu_vsts_ul, ra, rb, disp12,
+ 0);
+ break;
+ case 0xb:
+ /* VSTS_UH */
+ gen_store_mem_simd(ctx, &gen_qemu_vsts_uh, ra, rb, disp12,
+ 0);
+ break;
+ case 0xc:
+ /* VSTD_UL */
+ gen_store_mem_simd(ctx, &gen_qemu_vstd_ul, ra, rb, disp12,
+ 0);
+ break;
+ case 0xd:
+ /* VSTD_UH */
+ gen_store_mem_simd(ctx, &gen_qemu_vstd_uh, ra, rb, disp12,
+ 0);
+ break;
+ case 0xe:
+ /* VLDD_NC */
+ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12, 0);
+ break;
+ case 0xf:
+ /* VSTD_NC */
+ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12, 0);
+ break;
+ default:
+ printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn4);
+ ret = gen_invalid(ctx);
+ break;
+ }
+ break;
+ case 0x20:
+ /* LDBU */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
+ break;
+ case 0x21:
+ /* LDHU */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
+ break;
+ case 0x22:
+ /* LDW */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
+ break;
+ case 0x23:
+ /* LDL */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
+ break;
+ case 0x24:
+ /* LDL_U */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
+ break;
+ case 0x25:
+ /* PRI_LD */
+#ifndef CONFIG_USER_ONLY
+ if ((insn >> 12) & 1) {
+ gen_load_mem(ctx, &gen_qemu_pri_ldl, ra, rb, disp12, 0, 1);
+ } else {
+ gen_load_mem(ctx, &gen_qemu_pri_ldw, ra, rb, disp12, 0, 1);
+ }
+#endif
+ break;
+ case 0x26:
+ /* FLDS */
+ gen_load_mem(ctx, &gen_qemu_flds, ra, rb, disp16, 1, 0);
+ break;
+ case 0x27:
+ /* FLDD */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
+ break;
+ case 0x28:
+ /* STB */
+ gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
+ break;
+ case 0x29:
+ /* STH */
+ gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2a:
+ /* STW */
+ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2b:
+ /* STL */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2c:
+ /* STL_U */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
+ break;
+ case 0x2d:
+ /* PRI_ST */
+#ifndef CONFIG_USER_ONLY
+ if ((insn >> 12) & 1) {
+ gen_store_mem(ctx, &gen_qemu_pri_stl, ra, rb, disp12, 0, 1);
+ } else {
+ gen_store_mem(ctx, &gen_qemu_pri_stw, ra, rb, disp12, 0, 1);
+ }
+#endif
+ break;
+ case 0x2e:
+ /* FSTS */
+ gen_store_mem(ctx, &gen_qemu_fsts, ra, rb, disp16, 1, 0);
+ break;
+ case 0x2f:
+ /* FSTD */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
+ break;
+ case 0x30:
+ /* BEQ */
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, (uint64_t)-1);
+ break;
+ case 0x31:
+ /* BNE */
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, (uint64_t)-1);
+ break;
+ case 0x32:
+ /* BLT */
+ ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, (uint64_t)-1);
+ break;
+ case 0x33:
+ /* BLE */
+ ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, (uint64_t)-1);
+ break;
+ case 0x34:
+ /* BGT */
+ ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, (uint64_t)-1);
+ break;
+ case 0x35:
+ /* BGE */
+ ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, (uint64_t)-1);
+ break;
+ case 0x36:
+ /* BLBC */
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
+ break;
+ case 0x37:
+ /* BLBS */
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
+ break;
+ case 0x38:
+ /* FBEQ */
+ ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
+ break;
+ case 0x39:
+ /* FBNE */
+ ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
+ break;
+ case 0x3a:
+ /* FBLT */
+ ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
+ break;
+ case 0x3b:
+ /* FBLE */
+ ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
+ break;
+ case 0x3c:
+ /* FBGT */
+ ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
+ break;
+ case 0x3d:
+ /* FBGE */
+ ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
+ break;
+ case 0x3f:
+ /* LDIH */
+ disp16 = ((uint32_t)disp16) << 16;
+ if (ra == 31) break;
+ va = load_gir(ctx, ra);
+ if (rb == 31) {
+ tcg_gen_movi_i64(va, disp16);
+ } else {
+ tcg_gen_addi_i64(va, load_gir(ctx, rb), (int64_t)disp16);
+ }
+ break;
+ case 0x3e:
+ /* LDI */
+ if (ra == 31) break;
+ va = load_gir(ctx, ra);
+ if (rb == 31) {
+ tcg_gen_movi_i64(va, disp16);
+ } else {
+ tcg_gen_addi_i64(va, load_gir(ctx, rb), (int64_t)disp16);
+ }
+ break;
+ do_invalid:
+ default:
+ printf("ILLEGAL BELOW OPC[%x] insn[%08x]\n", opc, insn);
+ ret = gen_invalid(ctx);
+ }
+ return ret;
+}
+static void sw64_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext* ctx = container_of(dcbase, DisasContext, base);
+ CPUSW64State* env = cpu->env_ptr; /*init by instance_initfn*/
+
+ ctx->tbflags = ctx->base.tb->flags;
+ ctx->mem_idx = cpu_mmu_index(env, false);
+#ifdef CONFIG_USER_ONLY
+ ctx->ir = cpu_std_ir;
+#else
+ ctx->ir = (ctx->tbflags & ENV_FLAG_HM_MODE ? cpu_hm_ir : cpu_std_ir);
+#endif
+ ctx->zero = NULL;
+}
+
+static void sw64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void sw64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ tcg_gen_insn_start(dcbase->pc_next);
+}
+
+static void sw64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUSW64State *env = cpu->env_ptr;
+ uint32_t insn;
+
+ insn = cpu_ldl_code(env, ctx->base.pc_next & (~3UL));
+ ctx->env = env;
+ ctx->base.pc_next += 4;
+ ctx->base.is_jmp = ctx->translate_one(dcbase, insn, cpu);
+
+ free_context_temps(ctx);
+ translator_loop_temp_check(&ctx->base);
+}
+
+/* FIXME:Linhainan */
+static void sw64_tr_tb_stop(DisasContextBase* dcbase, CPUState* cpu) {
+ DisasContext* ctx = container_of(dcbase, DisasContext, base);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_NORETURN:
+ break;
+ case DISAS_TOO_MANY:
+ if (use_goto_tb(ctx, ctx->base.pc_next)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ tcg_gen_exit_tb(ctx->base.tb, 0);
+ }
+ /* FALLTHRU */
+ case DISAS_PC_STALE:
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ /* FALLTHRU */
+ case DISAS_PC_UPDATED:
+ if (!use_exit_tb(ctx)) {
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ }
+ /* FALLTHRU */
+ case DISAS_PC_UPDATED_NOCHAIN:
+ if (ctx->base.singlestep_enabled) {
+ /* FIXME: for gdb*/
+ cpu_loop_exit(cpu);
+ } else {
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void sw64_tr_disas_log(const DisasContextBase* dcbase, CPUState* cpu) {
+ SW64CPU* sc = SW64_CPU(cpu);
+ qemu_log("IN(%d): %s\n", sc->cid,
+ lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first & (~0x3UL), dcbase->tb->size);
+}
+
+static void init_transops(CPUState *cpu, DisasContext *dc)
+{
+ dc->translate_one = translate_one;
+}
+
+void restore_state_to_opc(CPUSW64State* env, TranslationBlock* tb,
+ target_ulong* data) {
+ env->pc = data[0];
+}
+
+static const TranslatorOps sw64_trans_ops = {
+ .init_disas_context = sw64_tr_init_disas_context,
+ .tb_start = sw64_tr_tb_start,
+ .insn_start = sw64_tr_insn_start,
+ .translate_insn = sw64_tr_translate_insn,
+ .tb_stop = sw64_tr_tb_stop,
+ .disas_log = sw64_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState* cpu, TranslationBlock* tb, int max_insns)
+{
+ DisasContext dc;
+ init_transops(cpu, &dc);
+ translator_loop(&sw64_trans_ops, &dc.base, cpu, tb, max_insns);
+}
diff --git a/target/sw64/translate.h b/target/sw64/translate.h
new file mode 100644
index 0000000000..e93df0815e
--- /dev/null
+++ b/target/sw64/translate.h
@@ -0,0 +1,60 @@
+#ifndef SW64_TRANSLATE_H
+#define SW64_TRANSLATE_H
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "sysemu/cpus.h"
+#include "disas/disas.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "tcg/tcg-op.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "trace-tcg.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+
+#define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
+#define DISAS_PC_UPDATED DISAS_TARGET_1
+#define DISAS_PC_STALE DISAS_TARGET_2
+#define DISAS_PC_UPDATED_T DISAS_TOO_MANY
+
+typedef struct DisasContext DisasContext;
+struct DisasContext {
+ DisasContextBase base;
+
+ uint32_t tbflags;
+
+ /* The set of registers active in the current context. */
+ TCGv *ir;
+
+ /* Accel: Temporaries for $31 and $f31 as source and destination. */
+ TCGv zero;
+ int mem_idx;
+ CPUSW64State *env;
+ DisasJumpType (*translate_one)(DisasContextBase *dcbase, uint32_t insn,
+ CPUState *cpu);
+};
+
+extern TCGv cpu_pc;
+extern TCGv cpu_std_ir[31];
+extern TCGv cpu_fr[128];
+extern TCGv cpu_lock_addr;
+extern TCGv cpu_lock_flag;
+extern TCGv cpu_lock_success;
+#ifdef SW64_FIXLOCK
+extern TCGv cpu_lock_value;
+#endif
+#ifndef CONFIG_USER_ONLY
+extern TCGv cpu_hm_ir[31];
+#endif
+
+DisasJumpType translate_one(DisasContextBase *dcbase, uint32_t insn,
+ CPUState *cpu);
+DisasJumpType th1_translate_one(DisasContextBase *dcbase, uint32_t insn,
+ CPUState *cpu);
+bool use_exit_tb(DisasContext *ctx);
+bool use_goto_tb(DisasContext *ctx, uint64_t dest);
+void insn_profile(DisasContext *ctx, uint32_t insn);
+extern void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src);
+#endif
diff --git a/tcg/sw64/tcg-target-con-set.h b/tcg/sw64/tcg-target-con-set.h
new file mode 100755
index 0000000000..71fdfdcbef
--- /dev/null
+++ b/tcg/sw64/tcg-target-con-set.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Define SW_64 target-specific constraint sets.
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
+ * Each operand should be a sequence of constraint letters as defined by
+ * tcg-target-con-str.h; the constraint combination is inclusive or.
+ */
+C_O0_I1(r)
+C_O0_I2(lZ, l)
+C_O0_I2(r, rA)
+C_O0_I2(rZ, r)
+C_O0_I2(w, r)
+C_O1_I1(r, l)
+C_O1_I1(r, r)
+C_O1_I1(w, r)
+C_O1_I1(w, w)
+C_O1_I1(w, wr)
+C_O1_I2(r, 0, rZ)
+C_O1_I2(r, r, r)
+C_O1_I2(r, r, rA)
+C_O1_I2(r, r, rAL)
+C_O1_I2(r, r, ri)
+C_O1_I2(r, r, rL)
+C_O1_I2(r, rZ, rZ)
+C_O1_I2(w, 0, w)
+C_O1_I2(w, w, w)
+C_O1_I2(w, w, wN)
+C_O1_I2(w, w, wO)
+C_O1_I2(w, w, wZ)
+C_O1_I3(w, w, w, w)
+C_O1_I4(r, r, rA, rZ, rZ)
+C_O2_I4(r, r, rZ, rZ, rA, rMZ)
+C_O1_I4(r, r, rU, rZ, rZ)
+C_O0_I2(r, rU)
+C_O1_I2(r, r, rU)
diff --git a/tcg/sw64/tcg-target-con-str.h b/tcg/sw64/tcg-target-con-str.h
new file mode 100755
index 0000000000..47edb3837b
--- /dev/null
+++ b/tcg/sw64/tcg-target-con-str.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Define sw_64 target-specific operand constraints.
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * Define constraint letters for register sets:
+ * REGS(letter, register_mask)
+ */
+REGS('r', ALL_GENERAL_REGS)
+REGS('l', ALL_QLDST_REGS)
+REGS('w', ALL_VECTOR_REGS)
+
+/*
+ * Define constraint letters for constants:
+ * CONST(letter, TCG_CT_CONST_* bit set)
+ */
+
+CONST('Z', TCG_CT_CONST_ZERO)
+CONST('A', TCG_CT_CONST_LONG)
+CONST('M', TCG_CT_CONST_MONE)
+CONST('O', TCG_CT_CONST_ORRI)
+CONST('W', TCG_CT_CONST_WORD)
+CONST('L', TCG_CT_CONST_LONG)
+CONST('U', TCG_CT_CONST_U8)
+CONST('S', TCG_CT_CONST_S8)
+
diff --git a/tcg/sw64/tcg-target.c.inc b/tcg/sw64/tcg-target.c.inc
new file mode 100755
index 0000000000..982f159e23
--- /dev/null
+++ b/tcg/sw64/tcg-target.c.inc
@@ -0,0 +1,2109 @@
+/*
+ * Initial TCG Implementation for sw_64
+ *
+ */
+
+#include "../tcg-pool.c.inc"
+#include "qemu/bitops.h"
+
+/* We're going to re-use TCGType in setting of the SF bit, which controls
+ the size of the operation performed. If we know the values match, it
+ makes things much cleaner. */
+QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1);
+static const tcg_insn_unit *tb_ret_addr;
+
+#ifdef CONFIG_DEBUG_TCG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
+ "X8", "X9", "X10", "X11", "X12", "X13", "X14", "fp",
+ "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23",
+ "X24", "X25", "X26", "X27", "X28", "X29", "Xsp", "X31",
+
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+};
+#endif /* CONFIG_DEBUG_TCG */
+
+static const int tcg_target_reg_alloc_order[] = {
+ /* TCG_REG_X9 qemu saved for AREG0*/
+ TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14,
+
+ TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4,
+ TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8,
+
+ TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, /*TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, */
+
+ /* TCG_REG_SP=TCG_REG_X15 saved for system*/
+ TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21, TCG_REG_X28, /* TCG_REG_X29, TCG_REG_X30, TCG_REG_X31 */
+
+ /* TCG_REG_TMP=TCG_REG_X27 reserved as temporary register */
+ /* TCG_REG_TMP2=TCG_REG_X25 reserved as temporary register */
+ /* TCG_REG_RA=TCG_REG_X26 reserved as temporary */
+ /* TCG_REG_GP=TCG_REG_X29 gp saved for system*/
+ /* TCG_REG_SP=TCG_REG_X30 sp saved for system*/
+ /* TCG_REG_ZERO=TCG_REG_X31 zero saved for system*/
+
+ TCG_REG_F2, TCG_REG_F3, TCG_REG_F4, TCG_REG_F5, TCG_REG_F6, TCG_REG_F7, TCG_REG_F8, TCG_REG_F9, /* f2-f9 saved registers */
+ /* TCG_VEC_TMP=TCG_REG_F10, TCG_VEC_TMP2=TCG_REG_F11, are saved as temporary */
+ TCG_REG_F12, TCG_REG_F13, TCG_REG_F14, TCG_REG_F15, /* f10-f15 temporary registers */
+
+ TCG_REG_F22, TCG_REG_F23, TCG_REG_F24, TCG_REG_F25, TCG_REG_F26, TCG_REG_F27, TCG_REG_F28, TCG_REG_F29, TCG_REG_F30, /* f22-f30 temporary registers */
+ /* TCG_REG_F31, zero saved for system */
+
+ TCG_REG_F16, TCG_REG_F17, TCG_REG_F18, TCG_REG_F19, TCG_REG_F20, TCG_REG_F21, /* input args */
+
+ TCG_REG_F0, TCG_REG_F1, /*output args */
+};
+
+static const int tcg_target_call_iarg_regs[6] = {
+ TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21,
+};
+static const int tcg_target_call_oarg_regs[1] = {
+ TCG_REG_X0,
+};
+
+#define TCG_REG_TMP TCG_REG_X27
+#define TCG_REG_TMP2 TCG_REG_X25
+#define TCG_FLOAT_TMP TCG_REG_F10
+#define TCG_FLOAT_TMP2 TCG_REG_F11
+
+#define ALL_GENERAL_REGS 0xffffffffu
+#define ALL_QLDST_REGS ALL_GENERAL_REGS
+#define PUSH_SIZE ((15-9+1+1) * 8)
+#define FRAME_SIZE \
+ ((PUSH_SIZE \
+ + TCG_STATIC_CALL_ARGS_SIZE \
+ + CPU_TEMP_BUF_NLONGS * sizeof(long) \
+ + TCG_TARGET_STACK_ALIGN - 1) \
+ & ~(TCG_TARGET_STACK_ALIGN - 1))
+
+/* We encode the format of the insn into the beginning of the name, so that
+ we can have the preprocessor help "typecheck" the insn vs the output
+ function. We don't have nice names for the formats, so we use the section
+ number of the architecture reference manual in which the instruction
+ group is described. */
+#define OPC_OP(x) ((x & 0x3f) << 26)
+#define OPC_FUNC(x) ((x & 0xff) << 5)
+#define OPC_FUNC_COMPLEX(x) ((x & 0xff) << 10)
+typedef enum {
+ OPC_NOP =0X43ff075f,
+ OPC_SYS_CALL =OPC_OP(0x00),
+ OPC_CALL =OPC_OP(0x01),
+ OPC_RET =OPC_OP(0x02),
+ OPC_JMP =OPC_OP(0x03),
+ OPC_BR =OPC_OP(0x04),
+ OPC_BSR =OPC_OP(0x05),
+ OPC_PRI_RET =OPC_OP(0x07),
+ OPC_LDWE =OPC_OP(0x09),
+ OPC_LDSE =OPC_OP(0x0A),
+ OPC_LDDE =OPC_OP(0x0B),
+ OPC_VLDS =OPC_OP(0x0C),
+ OPC_VLDD =OPC_OP(0x0D),
+ OPC_VSTS =OPC_OP(0x0E),
+ OPC_VSTD =OPC_OP(0x0F),
+
+ OPC_LDBU =OPC_OP(0x20),
+ OPC_LDHU =OPC_OP(0x21),
+ OPC_LDW =OPC_OP(0x22),
+ OPC_LDL =OPC_OP(0x23),
+ OPC_LDL_U =OPC_OP(0x24),
+ OPC_FLDS =OPC_OP(0X26),
+ OPC_PRI_LD =OPC_OP(0x25),
+ OPC_FLDD =OPC_OP(0X27),
+ OPC_STB =OPC_OP(0X28),
+ OPC_STH =OPC_OP(0x29),
+ OPC_STW =OPC_OP(0x2a),
+ OPC_STL =OPC_OP(0x2B),
+ OPC_STL_U =OPC_OP(0x2C),
+ OPC_PRI_ST =OPC_OP(0x2D),
+ OPC_FSTS =OPC_OP(0x2E),
+ OPC_FSTD =OPC_OP(0x2F),
+
+ OPC_BEQ =OPC_OP(0x30),
+ OPC_BNE =OPC_OP(0x31),
+ OPC_BLT =OPC_OP(0x32),
+ OPC_BLE =OPC_OP(0x33),
+ OPC_BGT =OPC_OP(0x34),
+ OPC_BGE =OPC_OP(0x35),
+ OPC_BLBC =OPC_OP(0x36),
+ OPC_BLBS =OPC_OP(0x37),
+
+ OPC_FBEQ =OPC_OP(0x38),
+ OPC_FBNE =OPC_OP(0x39),
+ OPC_FBLT =OPC_OP(0x3A),
+ OPC_FBLE =OPC_OP(0x3B),
+ OPC_FBGT =OPC_OP(0x3C),
+ OPC_FBGE =OPC_OP(0x3D),
+ OPC_LDI =OPC_OP(0x3E),
+ OPC_LDIH =OPC_OP(0x3F),
+
+ OPC_ADDW =(OPC_OP(0x10) | OPC_FUNC(0x0)),
+ OPC_ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x0)),
+ OPC_SUBW =(OPC_OP(0x10) | OPC_FUNC(0x1)),
+ OPC_SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x1)),
+ OPC_S4ADDW =(OPC_OP(0x10) | OPC_FUNC(0x02)),
+ OPC_S4ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x02)),
+ OPC_S4SUBW =(OPC_OP(0x10) | OPC_FUNC(0x03)),
+ OPC_S4SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x03)),
+
+ OPC_S8ADDW =(OPC_OP(0x10) | OPC_FUNC(0x04)),
+ OPC_S8ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x04)),
+ OPC_S8SUBW =(OPC_OP(0x10) | OPC_FUNC(0x05)),
+ OPC_S8SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x05)),
+
+ OPC_ADDL =(OPC_OP(0x10) | OPC_FUNC(0x8)),
+ OPC_ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0x8)),
+ OPC_SUBL =(OPC_OP(0x10) | OPC_FUNC(0x9)),
+ OPC_SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0x9)),
+
+ OPC_S4ADDL =(OPC_OP(0x10) | OPC_FUNC(0xA)),
+ OPC_S4ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xA)),
+ OPC_S4SUBL =(OPC_OP(0x10) | OPC_FUNC(0xB)),
+ OPC_S4SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xB)),
+
+ OPC_S8ADDL =(OPC_OP(0x10) | OPC_FUNC(0xC)),
+ OPC_S8ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xC)),
+ OPC_S8SUBL =(OPC_OP(0x10) | OPC_FUNC(0xD)),
+ OPC_S8SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xD)),
+
+ OPC_MULW =(OPC_OP(0x10) | OPC_FUNC(0x10)),
+ OPC_MULW_I =(OPC_OP(0x12) | OPC_FUNC(0x10)),
+ OPC_MULL =(OPC_OP(0x10) | OPC_FUNC(0x18)),
+ OPC_MULL_I =(OPC_OP(0x12) | OPC_FUNC(0x18)),
+
+ OPC_UMULH =(OPC_OP(0x10) | OPC_FUNC(0x19)),
+ OPC_UMULH_I =(OPC_OP(0x12) | OPC_FUNC(0x19)),
+
+ OPC_CTPOP =(OPC_OP(0x10) | OPC_FUNC(0x58)),
+ OPC_CTLZ =(OPC_OP(0x10) | OPC_FUNC(0x59)),
+ OPC_CTTZ =(OPC_OP(0x10) | OPC_FUNC(0x5A)),
+
+ OPC_ZAP =(OPC_OP(0x10) | OPC_FUNC(0x68)),
+ OPC_ZAP_I =(OPC_OP(0x12) | OPC_FUNC(0x68)),
+ OPC_ZAPNOT =(OPC_OP(0x10) | OPC_FUNC(0x69)),
+ OPC_ZAPNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x69)),
+
+ OPC_SEXTB =(OPC_OP(0x10) | OPC_FUNC(0x6A)),
+ OPC_SEXTB_I =(OPC_OP(0x12) | OPC_FUNC(0x6A)),
+ OPC_SEXTH =(OPC_OP(0x10) | OPC_FUNC(0x6B)),
+ OPC_SEXTH_I =(OPC_OP(0x12) | OPC_FUNC(0x6B)),
+
+ OPC_CMPEQ =(OPC_OP(0x10) | OPC_FUNC(0x28)),
+ OPC_CMPEQ_I =(OPC_OP(0x12) | OPC_FUNC(0x28)),
+
+ OPC_CMPLT =(OPC_OP(0x10) | OPC_FUNC(0x29)),
+ OPC_CMPLT_I =(OPC_OP(0x12) | OPC_FUNC(0x29)),
+ OPC_CMPLE =(OPC_OP(0x10) | OPC_FUNC(0x2A)),
+ OPC_CMPLE_I =(OPC_OP(0x12) | OPC_FUNC(0x2A)),
+
+ OPC_CMPULT =(OPC_OP(0x10) | OPC_FUNC(0x2B)),
+ OPC_CMPULT_I =(OPC_OP(0x12) | OPC_FUNC(0x2B)),
+ OPC_CMPULE =(OPC_OP(0x10) | OPC_FUNC(0x2C)),
+ OPC_CMPULE_I =(OPC_OP(0x12) | OPC_FUNC(0x2C)),
+
+ OPC_AND =(OPC_OP(0x10) | OPC_FUNC(0x38)),
+ OPC_BIC =(OPC_OP(0x10) | OPC_FUNC(0x39)),
+ OPC_BIS =(OPC_OP(0x10) | OPC_FUNC(0x3A)),
+ OPC_ORNOT =(OPC_OP(0x10) | OPC_FUNC(0x3B)),
+ OPC_XOR =(OPC_OP(0x10) | OPC_FUNC(0x3C)),
+ OPC_EQV =(OPC_OP(0x10) | OPC_FUNC(0x3D)),
+
+ OPC_AND_I =(OPC_OP(0x12) | OPC_FUNC(0x38)),
+ OPC_BIC_I =(OPC_OP(0x12) | OPC_FUNC(0x39)),
+ OPC_BIS_I =(OPC_OP(0x12) | OPC_FUNC(0x3A)),
+ OPC_ORNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x3B)),
+ OPC_XOR_I =(OPC_OP(0x12) | OPC_FUNC(0x3C)),
+ OPC_EQV_I =(OPC_OP(0x12) | OPC_FUNC(0x3D)),
+
+ OPC_SLL =(OPC_OP(0x10) | OPC_FUNC(0x48)),
+ OPC_SRL =(OPC_OP(0x10) | OPC_FUNC(0x49)),
+ OPC_SRA =(OPC_OP(0x10) | OPC_FUNC(0x4A)),
+ OPC_SLL_I =(OPC_OP(0x12) | OPC_FUNC(0x48)),
+ OPC_SRL_I =(OPC_OP(0x12) | OPC_FUNC(0x49)),
+ OPC_SRA_I =(OPC_OP(0x12) | OPC_FUNC(0x4A)),
+
+ OPC_SELEQ =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x00)),
+ OPC_SELGE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x01)),
+ OPC_SELGT =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x02)),
+ OPC_SELLE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x03)),
+ OPC_SELLT =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x04)),
+ OPC_SELNE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x05)),
+ OPC_SELLBC =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x06)),
+ OPC_SELLBS =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x07)),
+ OPC_SELEQ_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x00)),
+ OPC_SELGE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x01)),
+ OPC_SELGT_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x02)),
+ OPC_SELLE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x03)),
+ OPC_SELLT_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x04)),
+ OPC_SELNE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x05)),
+ OPC_SELLBC_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x06)),
+ OPC_SELLBS_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x07)),
+
+ OPC_INS0B =(OPC_OP(0x10) | OPC_FUNC(0x40)),
+ OPC_INS1B =(OPC_OP(0x10) | OPC_FUNC(0x41)),
+ OPC_INS2B =(OPC_OP(0x10) | OPC_FUNC(0x42)),
+ OPC_INS3B =(OPC_OP(0x10) | OPC_FUNC(0x43)),
+ OPC_INS4B =(OPC_OP(0x10) | OPC_FUNC(0x44)),
+ OPC_INS5B =(OPC_OP(0x10) | OPC_FUNC(0x45)),
+ OPC_INS6B =(OPC_OP(0x10) | OPC_FUNC(0x46)),
+ OPC_INS7B =(OPC_OP(0x10) | OPC_FUNC(0x47)),
+ OPC_INS0B_I =(OPC_OP(0x12) | OPC_FUNC(0x40)),
+ OPC_INS1B_I =(OPC_OP(0x12) | OPC_FUNC(0x41)),
+ OPC_INS2B_I =(OPC_OP(0x12) | OPC_FUNC(0x42)),
+ OPC_INS3B_I =(OPC_OP(0x12) | OPC_FUNC(0x43)),
+ OPC_INS4B_I =(OPC_OP(0x12) | OPC_FUNC(0x44)),
+ OPC_INS5B_I =(OPC_OP(0x12) | OPC_FUNC(0x45)),
+ OPC_INS6B_I =(OPC_OP(0x12) | OPC_FUNC(0x46)),
+ OPC_INS7B_I =(OPC_OP(0x12) | OPC_FUNC(0x47)),
+
+ OPC_EXT0B =(OPC_OP(0x10) | OPC_FUNC(0x50)),
+ OPC_EXT1B =(OPC_OP(0x10) | OPC_FUNC(0x51)),
+ OPC_EXT2B =(OPC_OP(0x10) | OPC_FUNC(0x52)),
+ OPC_EXT3B =(OPC_OP(0x10) | OPC_FUNC(0x53)),
+ OPC_EXT4B =(OPC_OP(0x10) | OPC_FUNC(0x54)),
+ OPC_EXT5B =(OPC_OP(0x10) | OPC_FUNC(0x55)),
+ OPC_EXT6B =(OPC_OP(0x10) | OPC_FUNC(0x56)),
+ OPC_EXT7B =(OPC_OP(0x10) | OPC_FUNC(0x57)),
+ OPC_EXT0B_I =(OPC_OP(0x12) | OPC_FUNC(0x50)),
+ OPC_EXT1B_I =(OPC_OP(0x12) | OPC_FUNC(0x51)),
+ OPC_EXT2B_I =(OPC_OP(0x12) | OPC_FUNC(0x52)),
+ OPC_EXT3B_I =(OPC_OP(0x12) | OPC_FUNC(0x53)),
+ OPC_EXT4B_I =(OPC_OP(0x12) | OPC_FUNC(0x54)),
+ OPC_EXT5B_I =(OPC_OP(0x12) | OPC_FUNC(0x55)),
+ OPC_EXT6B_I =(OPC_OP(0x12) | OPC_FUNC(0x56)),
+ OPC_EXT7B_I =(OPC_OP(0x12) | OPC_FUNC(0x57)),
+
+ OPC_MASK0B =(OPC_OP(0x10) | OPC_FUNC(0x60)),
+ OPC_MASK1B =(OPC_OP(0x10) | OPC_FUNC(0x61)),
+ OPC_MASK2B =(OPC_OP(0x10) | OPC_FUNC(0x62)),
+ OPC_MASK3B =(OPC_OP(0x10) | OPC_FUNC(0x63)),
+ OPC_MASK4B =(OPC_OP(0x10) | OPC_FUNC(0x64)),
+ OPC_MASK5B =(OPC_OP(0x10) | OPC_FUNC(0x65)),
+ OPC_MASK6B =(OPC_OP(0x10) | OPC_FUNC(0x66)),
+ OPC_MASK7B =(OPC_OP(0x10) | OPC_FUNC(0x67)),
+ OPC_MASK0B_I =(OPC_OP(0x12) | OPC_FUNC(0x60)),
+ OPC_MASK1B_I =(OPC_OP(0x12) | OPC_FUNC(0x61)),
+ OPC_MASK2B_I =(OPC_OP(0x12) | OPC_FUNC(0x62)),
+ OPC_MASK3B_I =(OPC_OP(0x12) | OPC_FUNC(0x63)),
+ OPC_MASK4B_I =(OPC_OP(0x12) | OPC_FUNC(0x64)),
+ OPC_MASK5B_I =(OPC_OP(0x12) | OPC_FUNC(0x65)),
+ OPC_MASK6B_I =(OPC_OP(0x12) | OPC_FUNC(0x66)),
+ OPC_MASK7B_I =(OPC_OP(0x12) | OPC_FUNC(0x67)),
+
+ OPC_CNPGEB =(OPC_OP(0x10) | OPC_FUNC(0x6C)),
+ OPC_CNPGEB_I =(OPC_OP(0x12) | OPC_FUNC(0x6C)),
+
+ OPC_MEMB =(OPC_OP(0x06) | OPC_FUNC(0x0)),
+ OPC_RTC =(OPC_OP(0x06) | OPC_FUNC(0x20)),
+
+ /*float insn*/
+ OPC_RFPCR = (OPC_OP(0x18) | OPC_FUNC(0x50)),
+ OPC_WFPCR = (OPC_OP(0x18) | OPC_FUNC(0x51)),
+ OPC_SETFPEC0 = (OPC_OP(0x18) | OPC_FUNC(0x54)),
+ OPC_SETFPEC1 = (OPC_OP(0x18) | OPC_FUNC(0x55)),
+ OPC_SETFPEC2 = (OPC_OP(0x18) | OPC_FUNC(0x56)),
+ OPC_SETFPEC3 = (OPC_OP(0x18) | OPC_FUNC(0x57)),
+
+
+ OPC_IFMOVS = (OPC_OP(0x18) | OPC_FUNC(0x40)),
+ OPC_IFMOVD = (OPC_OP(0x18) | OPC_FUNC(0x41)),
+ OPC_FIMOVS = (OPC_OP(0x10) | OPC_FUNC(0x70)),
+ OPC_FIMOVD = (OPC_OP(0x10) | OPC_FUNC(0x78)),
+
+ /*translate S--D*/
+ /*translate S/D--Long*/
+ OPC_FCVTSD = (OPC_OP(0x18) | OPC_FUNC(0x20)),
+ OPC_FCVTDS = (OPC_OP(0x18) | OPC_FUNC(0x21)),
+ OPC_FCVTDL_G = (OPC_OP(0x18) | OPC_FUNC(0x22)),
+ OPC_FCVTDL_P = (OPC_OP(0x18) | OPC_FUNC(0x23)),
+ OPC_FCVTDL_Z = (OPC_OP(0x18) | OPC_FUNC(0x24)),
+ OPC_FCVTDL_N = (OPC_OP(0x18) | OPC_FUNC(0x25)),
+ OPC_FCVTDL = (OPC_OP(0x18) | OPC_FUNC(0x27)),
+ OPC_FCVTLS = (OPC_OP(0x18) | OPC_FUNC(0x2D)),
+ OPC_FCVTLD = (OPC_OP(0x18) | OPC_FUNC(0x2F)),
+
+
+ OPC_FADDS = (OPC_OP(0x18) | OPC_FUNC(0x00)),
+ OPC_FADDD = (OPC_OP(0x18) | OPC_FUNC(0x01)),
+ OPC_FSUBS = (OPC_OP(0x18) | OPC_FUNC(0x02)),
+ OPC_FSUBD = (OPC_OP(0x18) | OPC_FUNC(0x03)),
+ OPC_FMULS = (OPC_OP(0x18) | OPC_FUNC(0x04)),
+ OPC_FMULD = (OPC_OP(0x18) | OPC_FUNC(0x05)),
+ OPC_FDIVS = (OPC_OP(0x18) | OPC_FUNC(0x06)),
+ OPC_FDIVD = (OPC_OP(0x18) | OPC_FUNC(0x07)),
+ OPC_FSQRTS = (OPC_OP(0x18) | OPC_FUNC(0x08)),
+ OPC_FSQRTD = (OPC_OP(0x18) | OPC_FUNC(0x09)),
+}SW_64Insn;
+
+static void tcg_out_insn_br(TCGContext *s, SW_64Insn insn, TCGReg rd, intptr_t imm64);
+static void tcg_out_insn_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t imm16);
+static void tcg_out_insn_simpleReg(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, TCGReg rm);
+static void tcg_out_insn_simple(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, intptr_t imm64);
+static void tcg_out_insn_simpleImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, intptr_t imm64);
+static void tcg_out_insn_bitImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, unsigned long imm64);
+static void tcg_out_insn_bit(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, unsigned long imm64);
+static void tcg_out_insn_complexReg(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, TCGReg rn, TCGReg rm);
+static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,TCGReg a1,TCGReg a2, bool const_b, TCGReg v1, TCGReg v2);
+static bool reloc_pc21(tcg_insn_unit *src_rw, const tcg_insn_unit *target);
+static inline uint32_t tcg_in32(TCGContext *s);
+static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn);
+static void tcg_out_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, bool sign);
+static void tcg_out_cond_cmp(TCGContext *s, TCGCond cond, TCGReg ret, TCGArg a, TCGArg b, bool const_b);
+static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, TCGReg rn, int64_t aimm);
+static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, unsigned int m);
+static inline void tcg_out_rotl_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm);
+static inline void tcg_out_rotr_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm);
+static inline void tcg_out_rotl_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m);
+static inline void tcg_out_rotr_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m);
+static void tcg_out_cltz(TCGContext *s, SW_64Insn opc_clz, TCGType ext, TCGReg rd, TCGReg rn, TCGArg b, bool const_b);
+static inline void tcg_out_bswap16u(TCGContext *s, TCGReg rd, TCGReg rn);
+static inline void tcg_out_bswap16s(TCGContext *s, TCGReg rd, TCGReg rn);
+static inline void tcg_out_bswap32u(TCGContext *s, TCGReg rd, TCGReg rn);
+static inline void tcg_out_bswap32s(TCGContext *s, TCGReg rd, TCGReg rn);
+static inline void tcg_out_bswap64(TCGContext *s, TCGReg rd, TCGReg rn);
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi);
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi, TCGType ext);
+static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg arg1, TCGReg arg2);
+static void tcg_out_extract(TCGContext *s, TCGReg rd, TCGReg rn, int pos, int len);
+static void tcg_out_dep(TCGContext *s, TCGReg rd, TCGReg rn, int pos, int len);
+static void tcg_out_mulsh64(TCGContext *s, TCGReg rd, TCGReg rn, TCGReg rm);
+
+#define tcg_out_insn_jump tcg_out_insn_ldst
+#define tcg_out_insn_bitReg tcg_out_insn_simpleReg
+
+static void tcg_target_init(TCGContext *s)
+{
+ tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu;
+ tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu;
+ tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
+ tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
+ tcg_target_call_clobber_regs = -1ull;
+
+ //sw_64 callee saved x9-x15
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X9);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X10);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X11);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X12);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X13);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X14);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X15);
+
+ //sw_64 callee saved f2~f9
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F2);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F3);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F4);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F5);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F6);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F7);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F8);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F9);
+
+ s->reserved_regs = 0;
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); //TCG_REG_X27
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); //TCG_REG_X25
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); //TCG_REG_X26
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_X29); /*sw_64 platform register */
+ tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP); /*sw_64 platform register */
+ tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP2); /*sw_64 platform register */
+}
+
+
+#ifndef CONFIG_SOFTMMU
+ #define USE_GUEST_BASE guest_base != 0
+ #define TCG_REG_GUEST_BASE TCG_REG_X14
+#endif
+
+
+#define zeroExt 0
+#define sigExt 1
+
+
+static void tcg_target_qemu_prologue(TCGContext *s)
+{
+ TCGReg r;
+ int ofs;
+
+ /* allocate space for all saved registers */
+ /* subl $sp,PUSH_SIZE,$sp */
+ tcg_out_insn_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE);
+
+ /* Push (FP, LR) */
+ /* stl $fp,0($sp) */
+ tcg_out_insn_ldst(s, OPC_STL, TCG_REG_FP, TCG_REG_SP, 0);
+ /* stl $26,8($sp) */
+ tcg_out_insn_ldst(s, OPC_STL, TCG_REG_RA, TCG_REG_SP, 8);
+
+
+ /* Set up frame pointer for canonical unwinding. */
+ /* TCG_REG_FP=TCG_REG_SP */
+ tcg_out_movr(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
+
+ /* Store callee-preserved regs x9..x14. */
+ for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1){
+ ofs = (r - TCG_REG_X9 + 2) * 8;
+ tcg_out_insn_ldst(s, OPC_STL, r, TCG_REG_SP, ofs);
+ }
+
+ /* Make stack space for TCG locals. */
+ /* subl $sp,FRAME_SIZE-PUSH_SIZE,$sp */
+ tcg_out_insn_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE);
+
+ /* Inform TCG about how to find TCG locals with register, offset, size. */
+ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
+ CPU_TEMP_BUF_NLONGS * sizeof(long));
+
+#if !defined(CONFIG_SOFTMMU)
+ if (USE_GUEST_BASE) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
+ }
+#endif
+
+ /* TCG_AREG0=tcg_target_call_iarg_regs[0], on sw, we mov $16 to $9 */
+ tcg_out_mov(s, TCG_TYPE_I64, TCG_AREG0, tcg_target_call_iarg_regs[0]);
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
+
+ /*
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
+ * and fall through to the rest of the epilogue.
+ */
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, 0);
+
+ /* TB epilogue */
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
+
+ /* Remove TCG locals stack space. */
+ /* addl $sp,FRAME_SIZE-PUSH_SIZE,$sp */
+ tcg_out_insn_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE);
+
+ /* Restore registers x9..x14. */
+ for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1) {
+ int ofs = (r - TCG_REG_X9 + 2) * 8;
+ tcg_out_insn_ldst(s, OPC_LDL, r, TCG_REG_SP, ofs);
+ }
+
+
+ /* Pop (FP, LR) */
+ /* ldl $fp,0($sp) */
+ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_FP, TCG_REG_SP, 0);
+ /* ldl $26,8($sp) */
+ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_RA, TCG_REG_SP, 8);
+
+ /* restore SP to previous frame. */
+ /* addl $sp,PUSH_SIZE,$sp */
+ tcg_out_insn_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE);
+
+ tcg_out_insn_jump(s, OPC_RET, TCG_REG_ZERO, TCG_REG_RA, 0);
+}
+
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+{
+ if (ret == arg) {
+ return true;
+ }
+ switch (type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
+ if (ret < 32 && arg < 32) {
+ tcg_out_movr(s, type, ret, arg);
+ break;
+ } else if (ret < 32) {
+ break;
+ } else if (arg < 32) {
+ break;
+ }
+ /* FALLTHRU */
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+{
+ switch (op) {
+ case INDEX_op_goto_ptr:
+ return C_O0_I1(r);
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ case INDEX_op_sextract_i32:
+ case INDEX_op_sextract_i64:
+ return C_O1_I1(r, r);
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i64:
+ return C_O0_I2(rZ, r);
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ return C_O1_I2(r, r, rU);//rA
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ return C_O1_I2(r, r, rU);//compare,rA
+
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ case INDEX_op_muluh_i64:
+ case INDEX_op_mulsh_i64:
+ return C_O1_I2(r, r, r);
+
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ case INDEX_op_eqv_i32:
+ case INDEX_op_eqv_i64:
+ return C_O1_I2(r, r, rU);//rL
+
+ case INDEX_op_shl_i32:
+ case INDEX_op_shr_i32:
+ case INDEX_op_sar_i32:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i64:
+ return C_O1_I2(r, r, ri);
+
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ return C_O1_I2(r, r, r); //rAL
+
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ return C_O1_I2(r, r, r);//rAL
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ return C_O0_I2(r, rU);//rA
+
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ return C_O1_I4(r, r, rU, rZ, rZ);//rA->rU
+
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ return C_O1_I1(r, l);
+
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st_i64:
+ return C_O0_I2(lZ, l);
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ return C_O1_I2(r, 0, rZ);
+
+ case INDEX_op_extract2_i32:
+ case INDEX_op_extract2_i64:
+ return C_O1_I2(r, rZ, rZ);
+
+ case INDEX_op_add2_i32:
+ case INDEX_op_add2_i64:
+ case INDEX_op_sub2_i32:
+ case INDEX_op_sub2_i64:
+ return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
+
+ case INDEX_op_add_vec:
+ case INDEX_op_sub_vec:
+ case INDEX_op_mul_vec:
+ case INDEX_op_xor_vec:
+ case INDEX_op_ssadd_vec:
+ case INDEX_op_sssub_vec:
+ case INDEX_op_usadd_vec:
+ case INDEX_op_ussub_vec:
+ case INDEX_op_smax_vec:
+ case INDEX_op_smin_vec:
+ case INDEX_op_umax_vec:
+ case INDEX_op_umin_vec:
+ case INDEX_op_shlv_vec:
+ case INDEX_op_shrv_vec:
+ case INDEX_op_sarv_vec:
+ return C_O1_I2(w, w, w);
+ case INDEX_op_not_vec:
+ case INDEX_op_neg_vec:
+ case INDEX_op_abs_vec:
+ case INDEX_op_shli_vec:
+ case INDEX_op_shri_vec:
+ case INDEX_op_sari_vec:
+ return C_O1_I1(w, w);
+ case INDEX_op_ld_vec:
+ case INDEX_op_dupm_vec:
+ return C_O1_I1(w, r);
+ case INDEX_op_st_vec:
+ return C_O0_I2(w, r);
+ case INDEX_op_dup_vec:
+ return C_O1_I1(w, wr);
+ case INDEX_op_or_vec:
+ case INDEX_op_andc_vec:
+ return C_O1_I2(w, w, wO);
+ case INDEX_op_and_vec:
+ case INDEX_op_orc_vec:
+ return C_O1_I2(w, w, wN);
+ case INDEX_op_cmp_vec:
+ return C_O1_I2(w, w, wZ);
+ case INDEX_op_bitsel_vec:
+ return C_O1_I3(w, w, w, w);
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
+{
+ int i;
+ for (i = 0; i < count; ++i) {
+ p[i] = OPC_NOP;
+ }
+}
+
+/* SW instruction format of syscall
+ * insn = opcode[31,26]:Function[25,0],
+ */
+
+/* SW instruction format of br(alias jump)
+ * insn = opcode[31,26]:Rd[25,21]:disp[20,0],
+ */
+static void tcg_out_insn_br(TCGContext *s, SW_64Insn insn, TCGReg rd, intptr_t imm64)
+{
+ tcg_debug_assert(imm64 <= 0xfffff && imm64 >= -0x100000);
+ tcg_out32(s, insn | (rd & 0x1f) << 21 | (imm64 & 0x1fffff));
+}
+
+
+/* SW instruction format of (load and store)
+ * insn = opcode[31,26]:rd[25,21]:rn[20,16]:disp[15,0]
+ */
+static void tcg_out_insn_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t imm16)
+{
+ tcg_debug_assert(imm16 <= 0x7fff && imm16 >= -0x8000);
+ tcg_out32(s, insn | (rd & 0x1f) << 21 | (rn & 0x1f) << 16 | (imm16 & 0xffff));
+}
+
+
+/* SW instruction format of simple operator for Register
+ * insn = opcode[31,26]:rn(ra)[25,21]:rn(rb)[20,16]:Zeors[15,13]:function[12,5]:rd(rc)[4,0]
+ */
+static void tcg_out_insn_simpleReg(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ tcg_out32(s, insn | (rn & 0x1f) << 21 | (rm & 0x1f) << 16 | (rd & 0x1f));
+}
+
+/* SW instruction format of simple operator for imm
+ * insn = opcode[31,26]:rn(ra)[25,21]:disp[20,13]:function[12,5]:rd(rc)[4,0]
+ */
+static void tcg_out_insn_simple(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, intptr_t imm64)
+{
+ if(imm64 <= 0x7f && imm64 >= -0x80) {
+ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
+ }
+ else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
+ tcg_out_insn_simpleReg(s, insn_Reg, rd, rn, TCG_REG_TMP);
+ }
+}
+
+
+static void tcg_out_insn_simpleImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, intptr_t imm64)
+{
+ tcg_debug_assert(imm64 <= 0x7f && imm64 >= -0x80);
+ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
+
+}
+
+static void tcg_out_insn_bitImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, unsigned long imm64)
+{
+ tcg_debug_assert(imm64 <= 255);
+ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
+}
+/* sw bit operation: and bis etc */
+static void tcg_out_insn_bit(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, unsigned long imm64)
+{
+ if (imm64 <= 255) {
+ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
+ }
+ else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
+ tcg_out_insn_bitReg(s, insn_Reg, rd, rn, TCG_REG_TMP);
+ }
+}
+
+/* SW instruction format of complex operator
+ * insn = opcode[31,26]:rd[25,21]:rn[20,16],function[15,10]:rm[9,5]:rx[4,0]
+ */
+static void tcg_out_insn_complexReg(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ tcg_out32(s, insn | (cond & 0x1f) << 21 | (rn & 0x1f) << 16 | (rm & 0x1f) << 5 | (rd & 0x1f));
+}
+
+static bool reloc_pc21(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
+{
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
+ ptrdiff_t offset = target - (src_rx + 1) ;
+
+ if (offset == sextract64(offset, 0, 21)) {
+ /* read instruction, mask away previous PC_REL21 parameter contents,
+ set the proper offset, then write back the instruction. */
+ *src_rw = deposit32(*src_rw, 0, 21, offset);
+ return true;
+ }
+ return false;
+}
+
+/* sw*/
+static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend)
+{
+ tcg_debug_assert(addend == 0);
+ switch (type) {
+ case R_SW_64_BRADDR:
+ return reloc_pc21(code_ptr, (const tcg_insn_unit *)value);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline uint32_t tcg_in32(TCGContext *s)
+{
+ uint32_t v = *(uint32_t *)s->code_ptr;
+ return v;
+}
+
+/*SW Register to register move using ADDL*/
+static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
+{
+ tcg_out_insn_simpleReg(s, OPC_BIS, rd, rn, TCG_REG_ZERO);
+ if (ext == TCG_TYPE_I32){
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
+ }
+}
+
+/*sw
+ *put imm into rd
+ */
+static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long orig)
+{
+ long l0, l1, l2=0, l3=0, extra=0;
+ tcg_target_long val = orig;
+ TCGReg rs = TCG_REG_ZERO;
+
+ if (type == TCG_TYPE_I32)
+ val = (int32_t)val;
+
+ l0 = (int16_t)val;
+ val = (val - l0) >> 16;
+ l1 = (int16_t)val;
+
+ if (orig >> 31 == -1 || orig >> 31 == 0) {
+ if (l1 < 0 && orig >= 0) {
+ extra = 0x4000;
+ l1 = (int16_t)(val - 0x4000);
+ }
+ } else {
+ val = (val - l1) >> 16;
+ l2 = (int16_t)val;
+ val = (val - l2) >> 16;
+ l3 = (int16_t)val;
+
+ if (l3) {
+ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l3);
+ rs = rd;
+ }
+ if (l2) {
+ tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l2);
+ rs = rd;
+ }
+ if (l3 || l2)
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, rd, rd, 32);
+ }
+
+ if (l1) {
+ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l1);
+ rs = rd;
+ }
+
+ if (extra) {
+ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, extra);
+ rs = rd;
+ }
+
+ tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l0);
+}
+
+
+/*sw
+* memory <=> Reg in (B H W L) bytes
+*/
+static void tcg_out_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, bool sign)
+{
+ int16_t lo = offset;
+ if (offset != lo) {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset - lo);
+ if (rn != TCG_REG_ZERO) {
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, TCG_REG_TMP, rn);
+ }
+ tcg_out_insn_ldst(s, insn, rd, TCG_REG_TMP, lo);
+ }
+ else {
+ tcg_out_insn_ldst(s, insn, rd, rn, lo);
+ }
+
+ switch (insn) {
+ case OPC_LDBU:
+ if (sign)
+ tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rd); //for micro-op:INDEX_op_ld8s_i32/64,set rd[63,8]=1
+ break;
+ case OPC_LDHU:
+ if (sign)
+ tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rd); //for micro-op:INDEX_op_ld16s_i32/64,set rd[63,16]=1
+ break;
+ case OPC_LDW:
+ if (!sign)
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); //for micro-op:INDEX_op_ld32u_i32/64,set rd[63,32]=0
+ break;
+ default:
+ break;
+ }
+}
+
+/* TCG_REG_TMP stores result_of_condition_compare */
+static void tcg_out_cond_cmp(TCGContext *s, TCGCond cond, TCGReg ret, TCGArg a, TCGArg b, bool const_b)
+{
+ if (const_b) {
+ switch(cond) {
+ case TCG_COND_ALWAYS:
+ case TCG_COND_NEVER:
+ break;
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ tcg_out_insn_simple(s, OPC_CMPEQ_I, OPC_CMPEQ, ret, a, b);
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ tcg_out_insn_simple(s, OPC_CMPLT_I, OPC_CMPLT, ret, a, b);
+ break;
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ tcg_out_insn_simple(s, OPC_CMPLE_I, OPC_CMPLE, ret, a, b);
+ break;
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ tcg_out_insn_simple(s, OPC_CMPULT_I, OPC_CMPULT, ret, a, b);
+ break;
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ tcg_out_insn_simple(s, OPC_CMPULE_I, OPC_CMPULE, ret, a, b);
+ break;
+ }//cond
+ }//if (const_b)
+ else {
+ switch(cond) {
+ case TCG_COND_ALWAYS:
+ case TCG_COND_NEVER:
+ break;
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ tcg_out_insn_simpleReg(s, OPC_CMPEQ, ret, a, b);
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ tcg_out_insn_simpleReg(s, OPC_CMPLT, ret, a, b);
+ break;
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ tcg_out_insn_simpleReg(s, OPC_CMPLE, ret, a, b);
+ break;
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ tcg_out_insn_simpleReg(s, OPC_CMPULT, ret, a, b);
+ break;
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ tcg_out_insn_simpleReg(s, OPC_CMPULE, ret, a, b);
+ break;
+ }//cond
+ }//else
+ switch(cond) {
+ case TCG_COND_ALWAYS:
+ case TCG_COND_NEVER:
+ case TCG_COND_EQ:
+ case TCG_COND_LT:
+ case TCG_COND_LE:
+ case TCG_COND_LTU:
+ case TCG_COND_LEU:
+ break;
+ case TCG_COND_NE:
+ case TCG_COND_GE:
+ case TCG_COND_GT:
+ case TCG_COND_GEU:
+ case TCG_COND_GTU:
+ tcg_out_insn_bitImm(s, OPC_XOR_I, ret, ret, 0x1);
+ break;
+ }
+}
+
+/* sw
+ * step1 tcg_out_cmp() ,"eq" and "ne" in the same case with the same insn;
+ * store compare result by TCG_REG_TMP, for step2;
+ * step2: jump address with compare result. in last "switch" section, we diff qe/ne by different case with different insn.
+ */
+static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond cond, TCGArg a, TCGArg b, bool b_const, TCGLabel *l)
+{
+ intptr_t offset;
+ bool need_cmp;
+
+ if (b_const && b == 0 && (cond == TCG_COND_EQ || cond == TCG_COND_NE)) {
+ need_cmp = false;
+ } else {
+ need_cmp = true;
+ tcg_out_cond_cmp(s, cond, TCG_REG_TMP, a, b, b_const);
+ }
+
+ if (!l->has_value) {
+ tcg_out_reloc(s, s->code_ptr, R_SW_64_BRADDR, l, 0);
+ offset=0; //offset = tcg_in32(s) >> 5; br $31, 0, do not jump here!
+ } else {
+ offset = tcg_pcrel_diff(s, l->u.value_ptr) ;
+ offset = offset >> 2;
+ tcg_debug_assert(offset == sextract64(offset, 0, 21));
+ }
+
+ if (need_cmp) {
+ tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, offset); //a cond b,jmp
+ } else if (cond == TCG_COND_EQ) {
+ tcg_out_insn_br(s, OPC_BEQ, a, offset);
+ } else {
+ tcg_out_insn_br(s, OPC_BNE, a, offset);
+ }
+}
+
+/*sw
+ * contact with "tcg-target-con-str.h"
+ */
+#define TCG_CT_CONST_ZERO 0x100
+#define TCG_CT_CONST_LONG 0x200
+#define TCG_CT_CONST_MONE 0x400
+#define TCG_CT_CONST_ORRI 0x800
+#define TCG_CT_CONST_WORD 0X1000
+#define TCG_CT_CONST_U8 0x2000
+#define TCG_CT_CONST_S8 0X4000
+
+#define ALL_GENERAL_REGS 0xffffffffu
+#define ALL_VECTOR_REGS 0xffffffff00000000ull
+
+
+#ifdef CONFIG_SOFTMMU
+/*sw #define ALL_QLDST_REGS */
+#else
+ #define ALL_QLDST_REGS ALL_GENERAL_REGS
+#endif
+
+/* sw test if a constant matches the constraint */
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+{
+ if (ct & TCG_CT_CONST) {
+ return 1;
+ }
+ if (type == TCG_TYPE_I32) {
+ val = (int32_t)val;
+ }
+ if ((ct & TCG_CT_CONST_U8) && 0 <= val && val <= 255) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_LONG)) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_MONE)) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_ORRI)) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_WORD)) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ return 1;
+ }
+ return 0;
+}
+
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, intptr_t ofs)
+{
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_ldst(s, OPC_LDW, rd, rn, ofs, sigExt);
+ break;
+ case TCG_TYPE_I64:
+ tcg_out_ldst(s, OPC_LDL, rd, rn, ofs, sigExt);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, intptr_t ofs)
+{
+ switch (type) {
+ case TCG_TYPE_I32:
+ tcg_out_insn_ldst(s, OPC_STW, rd, rn, ofs);
+ break;
+ case TCG_TYPE_I64:
+ tcg_out_insn_ldst(s, OPC_STL, rd, rn, ofs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs)
+{
+ if (type <= TCG_TYPE_I64 && val == 0) {
+ tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
+ return true;
+ }
+ return false;
+}
+
+static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, TCGReg rn, int64_t imm64)
+{
+ if (imm64 >= 0) {
+ if(0 <=imm64 && imm64 <= 255) {
+ /* we use tcg_out_insn_bitImm because imm64 is between 0~255 */
+ tcg_out_insn_bitImm(s, OPC_ADDL_I, rd, rn, imm64);
+ }//aimm>0 && aimm == sextract64(aim, 0, 8)
+ else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
+ tcg_out_insn_simpleReg(s, OPC_ADDL, rd, rn, TCG_REG_TMP);
+ }//aimm>0 && aimm != sextract64(aim, 0, 8)
+ } else {
+ if(0 < -imm64 && -imm64 <= 255) {
+ /* we use tcg_out_insn_bitImm because -imm64 is between 0~255 */
+ tcg_out_insn_bitImm(s, OPC_SUBL_I, rd, rn, -imm64);
+ }//aimm<0 && aimm == sextract64(aim, 0, 8)
+ else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, -imm64);
+ tcg_out_insn_simpleReg(s, OPC_SUBL, rd, rn, TCG_REG_TMP);
+ }//aimm<0 && aimm != sextract64(aim, 0, 8)
+ }
+}
+
+static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
+{
+ ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
+ tcg_debug_assert(offset == sextract64(offset, 0, 21));
+ tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset);
+}
+
+static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
+{
+ ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
+ if (0 <= offset && offset <= 0x1fffff) {
+ tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, 0);
+ }
+}
+
+
+/*sw
+* call subroutine
+*/
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
+{
+ ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
+ if (offset == sextract64(offset, 0, 21)) {
+ tcg_out_insn_br(s, OPC_BSR, TCG_REG_RA, offset);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
+ tcg_out_insn_jump(s, OPC_CALL, TCG_REG_RA, TCG_REG_TMP, 0);
+ }
+}
+
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, uintptr_t jmp_rw, uintptr_t addr)
+{
+ tcg_debug_assert(0);
+ //sw not support
+}
+
+static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
+{
+ if (!l->has_value) {
+ tcg_out_reloc(s, s->code_ptr, R_SW_64_BRADDR, l, 0);
+ tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, 0);
+ } else {
+ tcg_out_goto(s, l->u.value_ptr);
+ }
+}
+
+/* sw
+ * resut: rd=rn(64,64-m]:rm(64-m,0]
+ * 1: rn(m,0]--->TCG_REG_TMP(64,64-m]
+ * 2: rm(64,64-m]--->rm(64-m,0]
+ * 3: rd=TCG_REG_TMP(64,64-m]:rm(64-m,0]
+ */
+static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, unsigned int m)
+{
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max));
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, (m & max));
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
+}
+
+/* sw
+ * loop right shift
+ */
+static inline void tcg_out_rotr_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m)
+{
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max));
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, (m & max));
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
+}
+
+/* sw loop right shift
+ */
+static inline void tcg_out_rotr_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ int bits = ext ? 64 : 32;
+ //get TCG_REG_TMP=64-[rm]
+ tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits);
+ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP);
+
+ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rn, TCG_REG_TMP); //get rn right part to TCG_REG_TMP
+ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rn, rm); //get rn left part to TCG_REG_TMP
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
+}
+
+/* sw
+ * loop left shift
+ */
+static inline void tcg_out_rotl_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m)
+{
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
+
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP, rn, bits -(m & max));
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, (m & max)); //get rn left part to TCG_REG_TMP
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); //get rn right part to left
+}
+
+
+/* sw loop left shift
+ */
+static inline void tcg_out_rotl_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ int bits = ext ? 64 : 32;
+ tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits); //rm = 64-rm
+ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP);
+
+ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rn, TCG_REG_TMP); //get rn left part to TCG_REG_TMP
+ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rn, rm); //get rn right part to left
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
+}
+
+
+
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_OP_ARGS], const int const_args[TCG_MAX_OP_ARGS])
+{
+ /* 99% of the time, we can signal the use of extension registers
+ by looking to see if the opcode handles 64-bit data. */
+ TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0;
+ /* Hoist the loads of the most common arguments. */
+ TCGArg a0 = args[0];
+ TCGArg a1 = args[1];
+ TCGArg a2 = args[2];
+ int c2 = const_args[2];
+
+ /* Some operands are defined with "rZ" constraint, a register or
+ the zero register. These need not actually test args[I] == 0. */
+ #define REG0(I) (const_args[I] ? TCG_REG_ZERO : (TCGReg)args[I])
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_goto_long(s, tcg_code_gen_epilogue);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
+ tcg_out_goto_long(s, tb_ret_addr);
+ }
+ break;
+
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_insn_offset != NULL) {
+ /* TCG_TARGET_HAS_direct_jump */
+ tcg_debug_assert(0);
+ /* not support here */
+ } else {
+ /* !TCG_TARGET_HAS_direct_jump */
+ tcg_debug_assert(s->tb_jmp_target_addr != NULL);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP, TCG_REG_ZERO, (uintptr_t)(s->tb_jmp_target_addr + a0));
+ }
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, 0);
+ set_jmp_reset_offset(s, a0);
+ break;
+
+ case INDEX_op_goto_ptr:
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, a0, 0);
+ break;
+
+ case INDEX_op_br:
+ tcg_out_goto_label(s, arg_label(a0));
+ break;
+
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 0);
+ break;
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 1);
+ break;
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 0);
+ break;
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 1);
+ break;
+ case INDEX_op_ld_i32:
+ tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 1);
+ break;
+ case INDEX_op_ld32u_i64:
+ tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 0);
+ break;
+ case INDEX_op_ld32s_i64:
+ tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 1);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldst(s, OPC_LDL, a0, a1, a2, 1);
+ break;
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ tcg_out_ldst(s, OPC_STB, a0, a1, a2, 0);
+ break;
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ tcg_out_ldst(s, OPC_STH, a0, a1, a2, 0);
+ break;
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ tcg_out_ldst(s, OPC_STW, a0, a1, a2, 0);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldst(s, OPC_STL, a0, a1, a2, 0);
+ break;
+
+ case INDEX_op_add_i32:
+ a2 = (int32_t)a2;
+ if (c2) {
+ tcg_out_addsubi(s, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn_simpleReg(s, OPC_ADDL, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_add_i64:
+ if (c2) {
+ tcg_out_addsubi(s, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn_simpleReg(s, OPC_ADDL, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_sub_i32:
+ a2 = (int32_t)a2;
+ if (c2) {
+ tcg_out_addsubi(s, ext, a0, a1, -a2);
+ } else {
+ tcg_out_insn_simpleReg(s, OPC_SUBL, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_sub_i64:
+ if (c2) {
+ tcg_out_addsubi(s, ext, a0, a1, -a2);
+ } else {
+ tcg_out_insn_simpleReg(s, OPC_SUBL, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_neg_i64:
+ case INDEX_op_neg_i32:
+ tcg_out_insn_bitReg(s, OPC_SUBL, a0, TCG_REG_ZERO, a1);
+ break;
+
+ case INDEX_op_and_i32:
+ a2 = (int32_t)a2;
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_and_i64:
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_andc_i32:
+ a2 = (int32_t)a2;
+ tcg_debug_assert(0);
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, ~a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_BIC, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_andc_i64:
+ tcg_debug_assert(0);
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, ~a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_BIC, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_or_i32:
+ a2 = (int32_t)a2;
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_or_i64:
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_orc_i32:
+ a2 = (int32_t)a2;
+ tcg_debug_assert(0);
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_orc_i64:
+ tcg_debug_assert(0);
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_xor_i32:
+ a2 = (int32_t)a2;
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_xor_i64:
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_eqv_i32:
+ a2 = (int32_t)a2;
+ tcg_debug_assert(0);
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, ~a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_EQV, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_eqv_i64:
+ tcg_debug_assert(0);
+ if (c2) {
+ tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, ~a2);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_EQV, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_not_i64:
+ case INDEX_op_not_i32:
+ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, TCG_REG_ZERO, a1);
+ break;
+
+ case INDEX_op_mul_i64:
+ case INDEX_op_mul_i32:
+ tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2);
+ break;
+
+ case INDEX_op_div_i64: /* a0=a1/a2 singed divide*/
+ case INDEX_op_div_i32:
+ tcg_debug_assert(0);
+ break;
+ case INDEX_op_divu_i64: /* a0=a1/a2 unsigned divide */
+ case INDEX_op_divu_i32:
+ tcg_debug_assert(0);
+ break;
+
+ case INDEX_op_rem_i64: /* if a1=17,a2=4, 17/4=4...1, a0=1 */
+ case INDEX_op_rem_i32:
+ tcg_debug_assert(0);
+ break;
+ case INDEX_op_remu_i64:
+ case INDEX_op_remu_i32:
+ tcg_debug_assert(0);
+ break;
+
+ case INDEX_op_shl_i64:
+ case INDEX_op_shl_i32: /* sw logical left*/
+ if (c2) {
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
+ tcg_out_insn_bitImm(s, OPC_SLL_I, a0, a1, a2&max);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_SLL, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_shr_i64:
+ case INDEX_op_shr_i32: /* sw logical right */
+ if (c2) {
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
+ tcg_out_insn_bitImm(s, OPC_SRL_I, a0, a1, a2&max);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_SRL, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_sar_i64:
+ case INDEX_op_sar_i32: /* sw arithmetic right*/
+ if (c2) {
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
+ tcg_out_insn_bitImm(s, OPC_SRA_I, a0, a1, a2&max);
+ } else {
+ tcg_out_insn_bitReg(s, OPC_SRA, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_rotr_i64:
+ case INDEX_op_rotr_i32: /* loop shift */
+ if (c2) {/* loop right shift a2*/
+ tcg_out_rotr_Imm(s, ext, a0, a1, a2);
+ } else {
+ tcg_out_rotr_Reg(s, ext, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotl_i32: /* loop shift */
+ if (c2) {/* loop left shift a2*/
+ tcg_out_rotl_Imm(s, ext, a0, a1, a2);
+ } else {
+ tcg_out_rotl_Reg(s, ext, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_clz_i64: /* counting leading zero numbers */
+ case INDEX_op_clz_i32:
+ tcg_out_cltz(s, OPC_CTLZ, ext, a0, a1, a2, c2);
+ break;
+ case INDEX_op_ctz_i64: /* counting tailing zero numbers */
+ case INDEX_op_ctz_i32:
+ tcg_out_cltz(s, OPC_CTTZ, ext, a0, a1, a2, c2);
+ break;
+
+ case INDEX_op_brcond_i32:
+ a1 = (int32_t)a1;
+ tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
+ break;
+
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
+ break;
+
+ case INDEX_op_setcond_i32:
+ a2 = (int32_t)a2;
+ tcg_out_setcond(s, args[3], a0, a1, a2);
+ break;
+
+ case INDEX_op_setcond_i64:
+ tcg_out_setcond(s, args[3], a0, a1, a2);
+ break;
+
+ case INDEX_op_movcond_i32:
+ a2 = (int32_t)a2;
+ tcg_out_movcond(s, args[5], a0, a1, a2, c2, REG0(3), REG0(4));
+ break;
+
+ /* FALLTHRU */
+ case INDEX_op_movcond_i64:
+ tcg_out_movcond(s, args[5], a0, a1, a2, c2, REG0(3), REG0(4));
+ break;
+
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ tcg_out_qemu_ld(s, a0, a1, a2, ext);
+ break;
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st_i64:
+ tcg_out_qemu_st(s, REG0(0), a1, a2);
+ break;
+
+ case INDEX_op_bswap64_i64: /* 0x123456789abcdef--->0xefcdab8967452301 */
+ tcg_debug_assert(0);
+ tcg_out_bswap64(s, a0, a1);
+ break;
+ case INDEX_op_bswap32_i64: /* 0x123456789abcdef--->0x67452301efcdab89 */
+ tcg_debug_assert(0);
+ tcg_out_bswap32u(s, a0, a1);
+ break;
+ case INDEX_op_bswap32_i32:
+ tcg_debug_assert(0);
+ break;
+ case INDEX_op_bswap16_i64: /* 0x123456789abcdef--->0x23016745ab89efcd */
+ case INDEX_op_bswap16_i32:
+ tcg_debug_assert(0);
+ break;
+
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8s_i32:
+ tcg_out_insn_simpleReg(s, OPC_SEXTB, a0, TCG_REG_ZERO, a1);
+ break;
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16s_i32:
+ tcg_out_insn_simpleReg(s, OPC_SEXTH, a0, TCG_REG_ZERO, a1);
+ break;
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_ext32s_i64:
+ tcg_out_insn_simpleReg(s, OPC_ADDW, a0, TCG_REG_ZERO, a1);
+ break;
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext8u_i32:
+ tcg_out_insn_simpleImm(s, OPC_EXT0B_I, a0, a1, 0x0);
+ break;
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext16u_i32:
+ tcg_out_insn_simpleImm(s, OPC_EXT1B_I, a0, a1, 0x0);
+ break;
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_ext32u_i64:
+ tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
+ break;
+
+ case INDEX_op_deposit_i64:
+ case INDEX_op_deposit_i32:
+ tcg_out_dep(s, a0, a2, args[3], args[4]);
+ break;
+
+ case INDEX_op_extract_i64:
+ case INDEX_op_extract_i32:
+ tcg_out_extract(s, a0, a1, a2, args[3]);
+ break;
+
+ case INDEX_op_sextract_i64:
+ case INDEX_op_sextract_i32:
+ tcg_debug_assert(0);
+ break;
+
+ case INDEX_op_extract2_i64:
+ case INDEX_op_extract2_i32: /* extract REG0(2) right args[3] bit to REG0(1) left ,save to a0*/
+ tcg_debug_assert(0);
+ break;
+
+ case INDEX_op_add2_i32:
+ tcg_debug_assert(0);
+ break;
+ case INDEX_op_add2_i64:
+ tcg_debug_assert(0);
+ break;
+ case INDEX_op_sub2_i32:
+ tcg_debug_assert(0);
+ break;
+ case INDEX_op_sub2_i64:
+ tcg_debug_assert(0);
+ break;
+
+ case INDEX_op_muluh_i64:
+ tcg_out_insn_simpleReg(s, OPC_UMULH, a0, a1, a2);
+ break;
+ case INDEX_op_mulsh_i64: /* sw not support */
+ tcg_out_mulsh64(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_mb:
+ break;
+
+ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_mov_i64:
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ default:
+ g_assert_not_reached();
+ }
+
+#undef REG0
+}
+
+
+
+/*sw
+* counting heading/tailing zero numbers
+*/
+static void tcg_out_cltz(TCGContext *s, SW_64Insn opc_clz, TCGType ext, TCGReg rd,
+ TCGReg rn, TCGArg b, bool const_b)
+{
+ /* cond1. b is a const, and b=64 or b=32 */
+ if (const_b && b == (ext ? 64 : 32)) {
+ /* count rn zero numbers, and writes to rd */
+ tcg_out_insn_simpleReg(s, opc_clz, rd, TCG_REG_ZERO, rn);
+ }else {
+ /* TCG_REG_TMP= counting rn heading/tailing zero numbers */
+ tcg_out_insn_simpleReg(s, opc_clz, TCG_REG_TMP, TCG_REG_ZERO, rn);
+
+ if (const_b) {
+ if (b == -1) {
+ /* cond2. b is const and b=-1 */
+ /* if rn != 0 , rd= counting rn heading/tailing zero numbers, else rd = 0xffffffffffffffff*/
+ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_ZERO);
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, TCG_REG_TMP2);
+ }
+ else if (b == 0) {
+ /* cond3. b is const and b=0 */
+ /* if rn != 0 , rd=counting rn heading/tailing zero numbers , else rd = TCG_REG_ZERO */
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, TCG_REG_ZERO);
+ } else {
+ /* cond4. b is const */
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP2, b);
+ /* if rn != 0 , rd=counting rn heading/tailing zero numbers , else mov b to rd */
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, TCG_REG_TMP2);
+ }
+ }
+ else {
+ /* if b is register */
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, b);
+ }
+ }
+}
+
+/*sw
+ * unsigned 16bit, ab->ba
+ */
+static inline void tcg_out_bswap16u(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+ TCGReg TCG_TMP0 = rn;
+ TCGReg TCG_TMP1 = rd;
+ /*t1=00b0*/
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP0, 8);
+ /*t1=(0000)000a*/
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP0, TCG_TMP0, 8);
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP0, TCG_TMP0, 0x1);
+ /*t1=ooba*/
+ tcg_out_insn_simpleReg(s, OPC_BIS, TCG_TMP1, TCG_TMP1, TCG_TMP0);
+}
+
+/*sw
+ * signed 16bit, ab->ssba
+ */
+static inline void tcg_out_bswap16s(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+ TCGReg TCG_TMP0 = rn;
+ TCGReg TCG_TMP1 = TCG_REG_TMP;
+ TCGReg TCG_TMP2 = rn;
+ /*t1=(ssss)ssb0*/
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP0, 8);
+ tcg_out_insn_simpleImm(s, OPC_ZAP_I, TCG_TMP1, TCG_TMP1, 0x2);
+ tcg_out_insn_simpleReg(s, OPC_SEXTH, TCG_TMP1, TCG_REG_ZERO, TCG_TMP1);
+ /*t2=(0000)000a*/
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP2, TCG_TMP0, 8);
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP2, 0x1);
+ /*t2=(ssss)ssba*/
+ tcg_out_insn_simpleReg(s, OPC_BIS, TCG_TMP1, TCG_TMP1, TCG_TMP2);
+}
+
+
+/*sw
+ * signed 32bit, abcd -> ssdcba
+ */
+static inline void tcg_out_bswap32s(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+ TCGReg TCG_TMP0 = rn;
+ TCGReg TCG_TMP3 = rd;
+ TCGReg TCG_TMP1 = TCG_REG_TMP;
+ TCGReg TCG_TMP2 = TCG_REG_TMP2;
+ /*swap32 -- 32-bit swap. a0 = abcd.*/
+
+ /* t3 = (ssss)d000 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP3, TCG_TMP0, 24);
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP3, TCG_TMP3, 0x0f);
+ tcg_out_insn_simpleReg(s, OPC_SEXTB, TCG_TMP1, TCG_REG_ZERO, TCG_TMP0);
+ tcg_out_insn_simpleImm(s, OPC_ZAP_I, TCG_TMP1, TCG_TMP1, 0x0f);
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+
+ /* t1 = 000a */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 24);
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x1);
+
+ /* t2 = 00c0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP0, 0x2);
+
+ /* t3 = (ssss)d00a */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+
+ /* t1 = 0abc */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 8);
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x7);
+
+ /* t2 = 0c00 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 8);
+ /* t1 = 00b0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
+ /* t3 = (ssss)dc0a */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ /* t3 = (ssss)dcba -- delay slot */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+}
+
+/*sw
+ * unsigned 32bit, abcd->dcba
+ */
+static void tcg_out_bswap32u(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+ TCGReg TCG_TMP0 = rn;
+ TCGReg TCG_TMP3 = rd;
+ TCGReg TCG_TMP1 = TCG_REG_TMP;
+ TCGReg TCG_TMP2 = TCG_REG_TMP2;
+
+ /*bswap32u -- unsigned 32-bit swap. a0 = ....abcd.*/
+ /* t1 = (0000)000d */
+ tcg_out_insn_bitImm(s, OPC_AND_I, TCG_TMP1, TCG_TMP0, 0xff);
+ /* t3 = 000a */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP3, TCG_TMP0, 24);
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP3, TCG_TMP3, 0x1);
+ /* t1 = (0000)d000 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP1, 24);
+ /* t2 = 00c0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP0, 0x2);
+ /* t3 = d00a */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 0abc */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 8);
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x7);
+ /* t2 = 0c00 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 8);
+ /* t1 = 00b0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
+ /* t3 = dc0a */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ /* t3 = dcba -- delay slot */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+}
+
+
+
+/*sw
+ * swap 64bit, abcdefgh->hgfedcba
+ */
+static void tcg_out_bswap64(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+
+ TCGReg TCG_TMP0 = rn;
+ TCGReg TCG_TMP3 = rd;
+ TCGReg TCG_TMP1 = TCG_REG_TMP;
+ TCGReg TCG_TMP2 = TCG_REG_TMP2;
+
+ /* bswap64 -- 64-bit swap. a0 = abcdefgh*/
+
+ /* t3 = h0000000 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP3, TCG_TMP0, 56);
+ /* t1 = 0000000a */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 56);
+ /* t2 = 000000g0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP0, 0x2);
+ /* t3 = h000000a */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 00000abc */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 40);
+ /* t2 = 0g000000 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 40);
+ /* t1 = 000000b0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
+ /* t3 = hg00000a */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ /* t2 = 0000abcd */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP2, TCG_TMP0, 32);
+ /* t3 = hg0000ba */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 000000c0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP2, 0x2);
+ /* t2 = 0000000d */
+ tcg_out_insn_bitImm(s, OPC_AND_I, TCG_TMP2, TCG_TMP2, 0xff);
+ /* t1 = 00000c00 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP1, 8);
+ /* t2 = 0000d000 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 24);
+ /* t3 = hg000cba */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 00abcdef */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 16);
+ /* t3 = hg00dcba */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ /* t2 = 0000000f */
+ tcg_out_insn_bitImm(s, OPC_AND_I, TCG_TMP2, TCG_TMP1, 0xff);
+ /* t1 = 000000e0 */
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
+ /* t2 = 00f00000 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 40);
+ /* t1 = 000e0000 */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP1, 24);
+ /* t3 = hgf0dcba */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ /* t3 = hgfedcba -- delay slot */
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+
+}
+
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi, TCGType ext)
+{
+#ifndef CONFIG_SOFTMMU
+ MemOp memop = get_memop(oi);
+ const TCGType otype = TCG_TYPE_I64;
+
+ if (USE_GUEST_BASE) {
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_GUEST_BASE, TCG_REG_GUEST_BASE, addr_reg);
+ tcg_out_qemu_ld_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, 0);
+ } else {
+ tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_TYPE_I64, 0);
+ }
+#endif /* CONFIG_SOFTMMU */
+
+}
+
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
+ MemOpIdx oi)
+{
+#ifndef CONFIG_SOFTMMU
+ MemOp memop = get_memop(oi);
+ const TCGType otype = TCG_TYPE_I64;
+
+ if (USE_GUEST_BASE) {
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_GUEST_BASE, TCG_REG_GUEST_BASE, addr_reg);
+ tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, 0);
+ } else {
+ tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_TYPE_I64, 0);
+ }
+#endif /* CONFIG_SOFTMMU */
+}
+
+
+/*sw
+ * if cond is successful, ret=1, otherwise ret = 0
+ */
+static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg arg1, TCGReg arg2)
+{
+ switch(cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_LT:
+ case TCG_COND_LE:
+ case TCG_COND_LTU:
+ case TCG_COND_LEU:
+ case TCG_COND_NE:
+ case TCG_COND_GE:
+ case TCG_COND_GT:
+ case TCG_COND_GEU:
+ case TCG_COND_GTU:
+ tcg_out_cond_cmp(s, cond, ret, arg1, arg2, 0);
+ break;
+ default:
+ tcg_abort();
+ break;
+ }
+}
+/*sw
+ * cond(a1,a2), yes:v1->ret, no:v2->ret
+ */
+static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg a1, TCGReg a2, bool const_b, TCGReg v1, TCGReg v2)
+{
+ tcg_out_cond_cmp(s, cond, TCG_REG_TMP, a1, a2, const_b);
+ tcg_out_insn_complexReg(s, OPC_SELLBS, TCG_REG_TMP, ret, v1, v2);
+}
+
+
+
+/*sw
+ * extract rn[lsb, lsb+len-1] -> rd[0, len-1]
+ */
+static void tcg_out_extract(TCGContext *s, TCGReg rd, TCGReg rn, int lsb, int len)
+{
+ //get 000..111..0000
+ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO);
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len);
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb);
+ /* get rn[lsb, lsb+len-1]-->rd[lsb, lsb+len-1] */
+ tcg_out_insn_bitReg(s, OPC_AND, rd, rn, TCG_REG_TMP);
+
+ /* rd[lsb, lsb+len-1] --> rd[0, len-1] */
+ tcg_out_insn_bitImm(s, OPC_SRL_I, rd, rd, lsb);
+}
+
+
+/*sw
+ * depos: rd = rd[63:msb+1]:rn[msb,lsb]:rd[lsb-1,0]
+ * len = msb -lsb + 1
+ */
+static void tcg_out_dep(TCGContext *s, TCGReg rd, TCGReg rn, int lsb, int len)
+{
+
+ //get 000..111..0000
+ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO);
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len);
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb);
+
+ /* TCG_REG_TMP2 = rn[msb,lsb] */
+ tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, 64-len);
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, TCG_REG_TMP2, 64-len-lsb);
+
+ /* clear rd[msb,lsb] */
+ tcg_out_insn_bitReg(s, OPC_BIC, rd, rd, TCG_REG_TMP);
+ /* rd = rd[63:msb+1]:rn[msb,lsb]:rd[lsb-1,0] */
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, rd, TCG_REG_TMP2);
+}
+
+/*sw
+ * get val_s64(rn) * val_s64(rm) -> res_128
+ * res[127:64] -> rd
+ * warn:maybe rd=rn or rm
+ */
+static void tcg_out_mulsh64(TCGContext *s, TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ tcg_out_insn_simpleReg(s, OPC_UMULH, TCG_REG_TMP, rn, rm);
+
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, 63);
+ tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rm);
+ tcg_out_insn_simpleReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP2);
+
+ tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, 63);
+ tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rn);
+ tcg_out_insn_simpleReg(s, OPC_SUBL, rd, TCG_REG_TMP, TCG_REG_TMP2);
+}
+
+typedef struct {
+ DebugFrameHeader h;
+ uint8_t fde_def_cfa[4];
+ uint8_t fde_reg_ofs[8 * 2];
+} DebugFrame;
+
+#define ELF_HOST_MACHINE EM_SW_64
+/* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS,
+ which is good because they're really quite complicated for SW_64. */
+
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
+ .h.cie.return_column = TCG_REG_RA,
+
+ /* Total FDE size does not include the "len" member. */
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
+
+ .fde_def_cfa = {
+ 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
+ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
+ (FRAME_SIZE >> 7)
+ },
+ .fde_reg_ofs = {
+ 0x80 + 14, 1, /* DW_CFA_offset, */
+ 0x80 + 13, 2, /* DW_CFA_offset, */
+ 0x80 + 12, 3, /* DW_CFA_offset, */
+ 0x80 + 11, 4, /* DW_CFA_offset, */
+ 0x80 + 10, 5, /* DW_CFA_offset, */
+ 0x80 + 9, 6, /* DW_CFA_offset, */
+ 0x80 + 26, 7, /* DW_CFA_offset, ra, -24 */
+ 0x80 + 15, 8, /* DW_CFA_offset, fp, -8 */
+ }
+};
+
+void tcg_register_jit(const void *buf, size_t buf_size)
+{
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
+}
diff --git a/tcg/sw64/tcg-target.h b/tcg/sw64/tcg-target.h
new file mode 100755
index 0000000000..3093e4fece
--- /dev/null
+++ b/tcg/sw64/tcg-target.h
@@ -0,0 +1,123 @@
+/*
+ * Initial TCG Implementation for sw_64
+ *
+ */
+
+#ifndef SW_64_TCG_TARGET_H
+#define SW_64_TCG_TARGET_H
+
+#define TCG_TARGET_INSN_UNIT_SIZE 4
+
+typedef enum {
+ TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
+ TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7,
+ TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
+ TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
+ TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19,
+ TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
+ TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
+ TCG_REG_X28, TCG_REG_X29, TCG_REG_X30, TCG_REG_X31,
+
+ TCG_REG_F0=32, TCG_REG_F1, TCG_REG_F2, TCG_REG_F3,
+ TCG_REG_F4, TCG_REG_F5, TCG_REG_F6, TCG_REG_F7,
+ TCG_REG_F8, TCG_REG_F9, TCG_REG_F10, TCG_REG_F11,
+ TCG_REG_F12, TCG_REG_F13, TCG_REG_F14, TCG_REG_F15,
+ TCG_REG_F16, TCG_REG_F17, TCG_REG_F18, TCG_REG_F19,
+ TCG_REG_F20, TCG_REG_F21, TCG_REG_F22, TCG_REG_F23,
+ TCG_REG_F24, TCG_REG_F25, TCG_REG_F26, TCG_REG_F27,
+ TCG_REG_F28, TCG_REG_F29, TCG_REG_F30, TCG_REG_F31,
+
+ /* Aliases. */
+ TCG_REG_FP = TCG_REG_X15,
+ TCG_REG_RA = TCG_REG_X26,
+ TCG_REG_GP = TCG_REG_X29,
+ TCG_REG_SP = TCG_REG_X30,
+ TCG_REG_ZERO = TCG_REG_X31,
+ TCG_AREG0 = TCG_REG_X9,
+} TCGReg;
+
+#define TCG_TARGET_NB_REGS 64
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_ALIGN_ARGS 1 /*luo*/
+#define TCG_TARGET_CALL_STACK_OFFSET 0 /*luo*/
+#define TCG_TARGET_HAS_neg_i64 1
+#define TCG_TARGET_HAS_direct_jump 0
+#define TCG_TARGET_HAS_goto_ptr 1
+#define TCG_TARGET_HAS_qemu_st8_i32 0
+#define TCG_TARGET_HAS_not_i32 1
+#define TCG_TARGET_HAS_neg_i32 1
+#define TCG_TARGET_HAS_div_i32 1
+#define TCG_TARGET_HAS_movcond_i32 1
+#define TCG_TARGET_HAS_rem_i32 0
+#define TCG_TARGET_HAS_rot_i32 1
+#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 1
+#define TCG_TARGET_HAS_sextract_i32 0
+#define TCG_TARGET_HAS_extract2_i32 0
+#define TCG_TARGET_HAS_add2_i32 0
+#define TCG_TARGET_HAS_sub2_i32 0
+#define TCG_TARGET_HAS_sub2_i32 0
+#define TCG_TARGET_HAS_mulu2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_not_i32 1
+#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_ext8s_i32 0
+#define TCG_TARGET_HAS_ext16s_i32 0
+#define TCG_TARGET_HAS_ext8u_i32 1
+#define TCG_TARGET_HAS_ext16u_i32 1
+#define TCG_TARGET_HAS_bswap16_i32 0
+#define TCG_TARGET_HAS_bswap32_i32 0
+#define TCG_TARGET_HAS_andc_i32 0
+#define TCG_TARGET_HAS_eqv_i32 0
+#define TCG_TARGET_HAS_nand_i32 0
+#define TCG_TARGET_HAS_nor_i32 0
+#define TCG_TARGET_HAS_clz_i32 0
+#define TCG_TARGET_HAS_ctz_i32 0
+#define TCG_TARGET_HAS_orc_i32 0
+#define TCG_TARGET_HAS_ctpop_i32 0
+#define TCG_TARGET_HAS_movcond_i64 1
+#define TCG_TARGET_HAS_div_i64 1
+#define TCG_TARGET_HAS_rem_i64 0
+#define TCG_TARGET_HAS_div2_i64 0
+#define TCG_TARGET_HAS_rot_i64 1
+#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 1
+#define TCG_TARGET_HAS_sextract_i64 0
+#define TCG_TARGET_HAS_extract2_i64 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
+#define TCG_TARGET_HAS_ext8s_i64 0
+#define TCG_TARGET_HAS_ext16s_i64 0
+#define TCG_TARGET_HAS_ext32s_i64 1
+#define TCG_TARGET_HAS_ext8u_i64 1
+#define TCG_TARGET_HAS_ext16u_i64 1
+#define TCG_TARGET_HAS_ext32u_i64 1
+#define TCG_TARGET_HAS_bswap16_i64 0
+#define TCG_TARGET_HAS_bswap32_i64 0
+#define TCG_TARGET_HAS_bswap64_i64 0
+#define TCG_TARGET_HAS_not_i64 1
+#define TCG_TARGET_HAS_andc_i64 0
+#define TCG_TARGET_HAS_orc_i64 1
+#define TCG_TARGET_HAS_eqv_i64 0
+#define TCG_TARGET_HAS_nand_i64 0
+#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 1
+#define TCG_TARGET_HAS_ctz_i64 1
+#define TCG_TARGET_HAS_ctpop_i64 0
+#define TCG_TARGET_HAS_add2_i64 0
+#define TCG_TARGET_HAS_sub2_i64 0
+#define TCG_TARGET_HAS_mulu2_i64 0
+#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 1
+#define TCG_TARGET_HAS_mulsh_i64 1
+#define TCG_TARGET_DEFAULT_MO (0)
+#define TCG_TARGET_HAS_MEMORY_BSWAP 0
+/* optional instructions */
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+#define TCG_TARGET_NEED_POOL_LABELS
+#endif /* SW_64_TCG_TARGET_H */
diff --git a/tcg/sw64/tcg-target.opc.h b/tcg/sw64/tcg-target.opc.h
new file mode 100755
index 0000000000..bce30accd9
--- /dev/null
+++ b/tcg/sw64/tcg-target.opc.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2019 Linaro
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version.
+ *
+ * See the COPYING file in the top-level directory for details.
+ *
+ * Target-specific opcodes for host vector expansion. These will be
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
+ * consider these to be UNSPEC with names.
+ */
+
+DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC)
+DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC)
--
2.27.0