binutils/Sw64-binutils-Add-Sw64-support.patch
2024-12-03 14:40:27 +08:00

20974 lines
626 KiB
Diff

diff --git a/Makefile.in b/Makefile.in
index 38f1f9ab..b0fa1958 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -701,6 +701,7 @@ all:
#### host and target specific makefile fragments come in here.
@target_makefile_frag@
@alphaieee_frag@
+@sw_64ieee_frag@
@ospace_frag@
@host_makefile_frag@
###
diff --git a/Makefile.tpl b/Makefile.tpl
index 88db8f44..9cea1974 100644
--- a/Makefile.tpl
+++ b/Makefile.tpl
@@ -624,6 +624,7 @@ all:
#### host and target specific makefile fragments come in here.
@target_makefile_frag@
@alphaieee_frag@
+@sw_64ieee_frag@
@ospace_frag@
@host_makefile_frag@
###
diff --git a/bfd/Makefile.am b/bfd/Makefile.am
index 5c5fdefd..4009f0a2 100644
--- a/bfd/Makefile.am
+++ b/bfd/Makefile.am
@@ -97,6 +97,7 @@ BFD64_LIBS_CFILES = archive64.c
ALL_MACHINES = \
cpu-aarch64.lo \
cpu-alpha.lo \
+ cpu-sw_64.lo \
cpu-amdgcn.lo \
cpu-arc.lo \
cpu-arm.lo \
@@ -180,6 +181,7 @@ ALL_MACHINES = \
ALL_MACHINES_CFILES = \
cpu-aarch64.c \
cpu-alpha.c \
+ cpu-sw_64.c \
cpu-amdgcn.c \
cpu-arc.c \
cpu-arm.c \
@@ -543,6 +545,7 @@ BFD64_BACKENDS = \
aix5ppc-core.lo \
aout64.lo \
coff-alpha.lo \
+ coff-sw_64.lo \
coff-x86_64.lo \
coff64-rs6000.lo \
elf32-ia64.lo \
@@ -550,6 +553,7 @@ BFD64_BACKENDS = \
elf32-score.lo \
elf32-score7.lo \
elf64-alpha.lo \
+ elf64-sw_64.lo \
elf64-amdgcn.lo \
elf64-gen.lo \
elf64-hppa.lo \
@@ -594,12 +598,14 @@ BFD64_BACKENDS_CFILES = \
aix5ppc-core.c \
aout64.c \
coff-alpha.c \
+ coff-sw_64.c \
coff-x86_64.c \
coff64-rs6000.c \
elf32-mips.c \
elf32-score.c \
elf32-score7.c \
elf64-alpha.c \
+ elf64-sw_64.c \
elf64-amdgcn.c \
elf64-gen.c \
elf64-hppa.c \
diff --git a/bfd/Makefile.in b/bfd/Makefile.in
index 4edfedee..e929a9e4 100644
--- a/bfd/Makefile.in
+++ b/bfd/Makefile.in
@@ -552,6 +552,7 @@ BFD64_LIBS_CFILES = archive64.c
ALL_MACHINES = \
cpu-aarch64.lo \
cpu-alpha.lo \
+ cpu-sw_64.lo \
cpu-amdgcn.lo \
cpu-arc.lo \
cpu-arm.lo \
@@ -635,6 +636,7 @@ ALL_MACHINES = \
ALL_MACHINES_CFILES = \
cpu-aarch64.c \
cpu-alpha.c \
+ cpu-sw_64.c \
cpu-amdgcn.c \
cpu-arc.c \
cpu-arm.c \
@@ -1000,6 +1002,7 @@ BFD64_BACKENDS = \
aix5ppc-core.lo \
aout64.lo \
coff-alpha.lo \
+ coff-sw_64.lo \
coff-x86_64.lo \
coff64-rs6000.lo \
elf32-ia64.lo \
@@ -1007,6 +1010,7 @@ BFD64_BACKENDS = \
elf32-score.lo \
elf32-score7.lo \
elf64-alpha.lo \
+ elf64-sw_64.lo \
elf64-amdgcn.lo \
elf64-gen.lo \
elf64-hppa.lo \
@@ -1051,12 +1055,14 @@ BFD64_BACKENDS_CFILES = \
aix5ppc-core.c \
aout64.c \
coff-alpha.c \
+ coff-sw_64.c \
coff-x86_64.c \
coff64-rs6000.c \
elf32-mips.c \
elf32-score.c \
elf32-score7.c \
elf64-alpha.c \
+ elf64-sw_64.c \
elf64-amdgcn.c \
elf64-gen.c \
elf64-hppa.c \
@@ -1439,6 +1445,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cf-i386lynx.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cisco-core.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-alpha.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-sw_64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-bfd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-go32.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-i386.Plo@am__quote@
@@ -1459,6 +1466,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/corefile.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-aarch64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-alpha.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-sw_64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-amdgcn.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-arc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-arm.Plo@am__quote@
@@ -1623,6 +1631,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf32.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-aarch64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-alpha.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-sw_64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-amdgcn.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-bpf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-gen.Plo@am__quote@
diff --git a/bfd/archures.c b/bfd/archures.c
index 5a2a76c3..a968c687 100644
--- a/bfd/archures.c
+++ b/bfd/archures.c
@@ -24,6 +24,9 @@
#include "libbfd.h"
#include "safe-ctype.h"
+/* add sw_64 target
+ * add cpu sw6a-1621, sw6b-3231. */
+
/*
SECTION
@@ -303,6 +306,11 @@ DESCRIPTION
.#define bfd_mach_alpha_ev4 0x10
.#define bfd_mach_alpha_ev5 0x20
.#define bfd_mach_alpha_ev6 0x30
+.#ifdef TARGET_SW_64
+. bfd_arch_sw_64, {* Sw_64 *}
+.#define bfd_mach_sw_64_sw6a 4
+.#define bfd_mach_sw_64_sw6b 8
+.#endif
. bfd_arch_arm, {* Advanced Risc Machines ARM. *}
.#define bfd_mach_arm_unknown 0
.#define bfd_mach_arm_2 1
@@ -619,6 +627,7 @@ DESCRIPTION
extern const bfd_arch_info_type bfd_aarch64_arch;
extern const bfd_arch_info_type bfd_alpha_arch;
+extern const bfd_arch_info_type bfd_sw_64_arch;
extern const bfd_arch_info_type bfd_amdgcn_arch;
extern const bfd_arch_info_type bfd_arc_arch;
extern const bfd_arch_info_type bfd_arm_arch;
@@ -707,6 +716,9 @@ static const bfd_arch_info_type * const bfd_archures_list[] =
#else
&bfd_aarch64_arch,
&bfd_alpha_arch,
+#ifdef TARGET_SW_64
+ &bfd_sw_64_arch,
+#endif
&bfd_amdgcn_arch,
&bfd_arc_arch,
&bfd_arm_arch,
diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h
index b34c8ef9..12dac144 100644
--- a/bfd/bfd-in2.h
+++ b/bfd/bfd-in2.h
@@ -1556,6 +1556,12 @@ enum bfd_architecture
#define bfd_mach_alpha_ev4 0x10
#define bfd_mach_alpha_ev5 0x20
#define bfd_mach_alpha_ev6 0x30
+#ifdef TARGET_SW_64
+ bfd_arch_sw_64, /* Sw_64 */
+#define bfd_mach_sw_64_sw6a 4
+#define bfd_mach_sw_64_sw6b 8
+#define bfd_mach_sw_64_sw8a 12
+#endif
bfd_arch_arm, /* Advanced Risc Machines ARM. */
#define bfd_mach_arm_unknown 0
#define bfd_mach_arm_2 1
@@ -3523,6 +3529,111 @@ between two procedure entry points is < 2^21, or else a hint. */
BFD_RELOC_ALPHA_TPREL_LO16,
BFD_RELOC_ALPHA_TPREL16,
+#ifdef TARGET_SW_64
+/* Sw_64 ECOFF and ELF relocations. Some of these treat the symbol or
+"addend" in some special way.
+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
+writing; when reading, it will be the absolute section symbol. The
+addend is the displacement in bytes of the "lda" instruction from
+the "ldah" instruction (which is at the address of this reloc). */
+ BFD_RELOC_SW_64_GPDISP_HI16,
+
+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
+with GPDISP_HI16 relocs. The addend is ignored when writing the
+relocations out, and is filled in with the file's GP value on
+reading, for convenience. */
+ BFD_RELOC_SW_64_GPDISP_LO16,
+
+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
+relocation except that there is no accompanying GPDISP_LO16
+relocation. */
+ BFD_RELOC_SW_64_GPDISP,
+
+/* The Sw_64 LITERAL/LITUSE relocs are produced by a symbol reference;
+the assembler turns it into a LDQ instruction to load the address of
+the symbol, and then fills in a register in the real instruction.
+
+The LITERAL reloc, at the LDQ instruction, refers to the .lita
+section symbol. The addend is ignored when writing, but is filled
+in with the file's GP value on reading, for convenience, as with the
+GPDISP_LO16 reloc.
+
+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
+It should refer to the symbol to be referenced, as with 16_GOTOFF,
+but it generates output not based on the position within the .got
+section, but relative to the GP value chosen for the file during the
+final link stage.
+
+The LITUSE reloc, on the instruction using the loaded address, gives
+information to the linker that it might be able to use to optimize
+away some literal section references. The symbol is ignored (read
+as the absolute section symbol), and the "addend" indicates the type
+of instruction using the register:
+1 - "memory" fmt insn
+2 - byte-manipulation (byte offset reg)
+3 - jsr (target of branch) */
+ BFD_RELOC_SW_64_LITERAL,
+ BFD_RELOC_SW_64_ELF_LITERAL,
+ BFD_RELOC_SW_64_ELF_LITERAL_GOT,
+ BFD_RELOC_SW_64_LITUSE,
+
+/* The HINT relocation indicates a value that should be filled into the
+"hint" field of a jmp/jsr/ret instruction, for possible branch-
+prediction logic which may be provided on some processors. */
+ BFD_RELOC_SW_64_HINT,
+
+/* The LINKAGE relocation outputs a linkage pair in the object file,
+which is filled by the linker. */
+ BFD_RELOC_SW_64_LINKAGE,
+
+/* The CODEADDR relocation outputs a STO_CA in the object file,
+which is filled by the linker. */
+ BFD_RELOC_SW_64_CODEADDR,
+
+/* The GPREL_HI/LO relocations together form a 32-bit offset from the
+GP register. */
+ BFD_RELOC_SW_64_GPREL_HI16,
+ BFD_RELOC_SW_64_GPREL_LO16,
+
+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
+share a common GP, and the target address is adjusted for
+STO_SW_64_STD_GPLOAD. */
+ BFD_RELOC_SW_64_BRSGP,
+
+/* The NOP relocation outputs a NOP if the longword displacement
+between two procedure entry points is < 2^21. */
+ BFD_RELOC_SW_64_NOP,
+
+/* The BSR relocation outputs a BSR if the longword displacement
+between two procedure entry points is < 2^21. */
+ BFD_RELOC_SW_64_BSR,
+
+/* The LDA relocation outputs a LDA if the longword displacement
+between two procedure entry points is < 2^16. */
+ BFD_RELOC_SW_64_LDA,
+
+/* The BOH relocation outputs a BSR if the longword displacement
+between two procedure entry points is < 2^21, or else a hint. */
+ BFD_RELOC_SW_64_BOH,
+
+/* Sw_64 thread-local storage relocations. */
+ BFD_RELOC_SW_64_TLSGD,
+ BFD_RELOC_SW_64_TLSLDM,
+ BFD_RELOC_SW_64_DTPMOD64,
+ BFD_RELOC_SW_64_GOTDTPREL16,
+ BFD_RELOC_SW_64_DTPREL64,
+ BFD_RELOC_SW_64_DTPREL_HI16,
+ BFD_RELOC_SW_64_DTPREL_LO16,
+ BFD_RELOC_SW_64_DTPREL16,
+ BFD_RELOC_SW_64_GOTTPREL16,
+ BFD_RELOC_SW_64_TPREL64,
+ BFD_RELOC_SW_64_TPREL_HI16,
+ BFD_RELOC_SW_64_TPREL_LO16,
+ BFD_RELOC_SW_64_TPREL16,
+ BFD_RELOC_SW_64_TLSREL_GOT,
+ BFD_RELOC_SW_64_BR26,
+#endif
+
/* The MIPS jump instruction. */
BFD_RELOC_MIPS_JMP,
BFD_RELOC_MICROMIPS_JMP,
diff --git a/bfd/coff-sw_64.c b/bfd/coff-sw_64.c
new file mode 100644
index 00000000..c5d515a5
--- /dev/null
+++ b/bfd/coff-sw_64.c
@@ -0,0 +1,2459 @@
+/* BFD back-end for SW_64 Extended-Coff files.
+ Copyright (C) 1993-2023 Free Software Foundation, Inc.
+ Modified from coff-mips.c by Steve Chamberlain <sac@cygnus.com> and
+ Ian Lance Taylor <ian@cygnus.com>.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "sysdep.h"
+#include "bfd.h"
+#include "bfdlink.h"
+#include "libbfd.h"
+#include "coff/internal.h"
+#include "coff/sym.h"
+#include "coff/symconst.h"
+#include "coff/ecoff.h"
+#include "coff/sw_64.h"
+#include "aout/ar.h"
+#include "libcoff.h"
+#include "libecoff.h"
+
+/* Prototypes for static functions. */
+
+/* ECOFF has COFF sections, but the debugging information is stored in
+ a completely different format. ECOFF targets use some of the
+ swapping routines from coffswap.h, and some of the generic COFF
+ routines in coffgen.c, but, unlike the real COFF targets, do not
+ use coffcode.h itself.
+
+ Get the generic COFF swapping routines, except for the reloc,
+ symbol, and lineno ones. Give them ecoff names. Define some
+ accessor macros for the large sizes used for SW_64 ECOFF. */
+
+#define GET_FILEHDR_SYMPTR H_GET_64
+#define PUT_FILEHDR_SYMPTR H_PUT_64
+#define GET_AOUTHDR_TSIZE H_GET_64
+#define PUT_AOUTHDR_TSIZE H_PUT_64
+#define GET_AOUTHDR_DSIZE H_GET_64
+#define PUT_AOUTHDR_DSIZE H_PUT_64
+#define GET_AOUTHDR_BSIZE H_GET_64
+#define PUT_AOUTHDR_BSIZE H_PUT_64
+#define GET_AOUTHDR_ENTRY H_GET_64
+#define PUT_AOUTHDR_ENTRY H_PUT_64
+#define GET_AOUTHDR_TEXT_START H_GET_64
+#define PUT_AOUTHDR_TEXT_START H_PUT_64
+#define GET_AOUTHDR_DATA_START H_GET_64
+#define PUT_AOUTHDR_DATA_START H_PUT_64
+#define GET_SCNHDR_PADDR H_GET_64
+#define PUT_SCNHDR_PADDR H_PUT_64
+#define GET_SCNHDR_VADDR H_GET_64
+#define PUT_SCNHDR_VADDR H_PUT_64
+#define GET_SCNHDR_SIZE H_GET_64
+#define PUT_SCNHDR_SIZE H_PUT_64
+#define GET_SCNHDR_SCNPTR H_GET_64
+#define PUT_SCNHDR_SCNPTR H_PUT_64
+#define GET_SCNHDR_RELPTR H_GET_64
+#define PUT_SCNHDR_RELPTR H_PUT_64
+#define GET_SCNHDR_LNNOPTR H_GET_64
+#define PUT_SCNHDR_LNNOPTR H_PUT_64
+
+#define SW_64ECOFF
+
+#define NO_COFF_RELOCS
+#define NO_COFF_SYMBOLS
+#define NO_COFF_LINENOS
+#define coff_swap_filehdr_in sw_64_ecoff_swap_filehdr_in
+#define coff_swap_filehdr_out sw_64_ecoff_swap_filehdr_out
+#define coff_swap_aouthdr_in sw_64_ecoff_swap_aouthdr_in
+#define coff_swap_aouthdr_out sw_64_ecoff_swap_aouthdr_out
+#define coff_swap_scnhdr_in sw_64_ecoff_swap_scnhdr_in
+#define coff_swap_scnhdr_out sw_64_ecoff_swap_scnhdr_out
+#include "coffswap.h"
+
+/* Get the ECOFF swapping routines. */
+#define ECOFF_64
+#include "ecoffswap.h"
+
+/* How to process the various reloc types. */
+
+static bfd_reloc_status_type
+reloc_nil (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc ATTRIBUTE_UNUSED,
+ asymbol *sym ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED,
+ asection *sec ATTRIBUTE_UNUSED, bfd *output_bfd ATTRIBUTE_UNUSED,
+ char **error_message ATTRIBUTE_UNUSED)
+{
+ return bfd_reloc_ok;
+}
+
+/* In case we're on a 32-bit machine, construct a 64-bit "-1" value
+ from smaller values. Start with zero, widen, *then* decrement. */
+#define MINUS_ONE (((bfd_vma) 0) - 1)
+
+static reloc_howto_type sw_64_howto_table[] = {
+ /* Reloc type 0 is ignored by itself. However, it appears after a
+ GPDISP reloc to identify the location where the low order 16 bits
+ of the gp register are loaded. */
+ HOWTO (SW_64_R_IGNORE, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 8, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ reloc_nil, /* special_function */
+ "IGNORE", /* name */
+ true, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* A 32 bit reference to a symbol. */
+ HOWTO (SW_64_R_REFLONG, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ 0, /* special_function */
+ "REFLONG", /* name */
+ true, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 64 bit reference to a symbol. */
+ HOWTO (SW_64_R_REFQUAD, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ 0, /* special_function */
+ "REFQUAD", /* name */
+ true, /* partial_inplace */
+ MINUS_ONE, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 32 bit GP relative offset. This is just like REFLONG except
+ that when the value is used the value of the gp register will be
+ added in. */
+ HOWTO (SW_64_R_GPREL32, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ 0, /* special_function */
+ "GPREL32", /* name */
+ true, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Used for an instruction that refers to memory off the GP
+ register. The offset is 16 bits of the 32 bit instruction. This
+ reloc always seems to be against the .lita section. */
+ HOWTO (SW_64_R_LITERAL, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ 0, /* special_function */
+ "LITERAL", /* name */
+ true, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* This reloc only appears immediately following a LITERAL reloc.
+ It identifies a use of the literal. It seems that the linker can
+ use this to eliminate a portion of the .lita section. The symbol
+ index is special: 1 means the literal address is in the base
+ register of a memory format instruction; 2 means the literal
+ address is in the byte offset register of a byte-manipulation
+ instruction; 3 means the literal address is in the target
+ register of a jsr instruction. This does not actually do any
+ relocation. */
+ HOWTO (SW_64_R_LITUSE, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ reloc_nil, /* special_function */
+ "LITUSE", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Load the gp register. This is always used for a ldah instruction
+ which loads the upper 16 bits of the gp register. The next reloc
+ will be an IGNORE reloc which identifies the location of the lda
+ instruction which loads the lower 16 bits. The symbol index of
+ the GPDISP instruction appears to actually be the number of bytes
+ between the ldah and lda instructions. This gives two different
+ ways to determine where the lda instruction is; I don't know why
+ both are used. The value to use for the relocation is the
+ difference between the GP value and the current location; the
+ load will always be done against a register holding the current
+ address. */
+ HOWTO (SW_64_R_GPDISP, /* type */
+ 16, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ reloc_nil, /* special_function */
+ "GPDISP", /* name */
+ true, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* A 21 bit branch. The native assembler generates these for
+ branches within the text segment, and also fills in the PC
+ relative offset in the instruction. */
+ HOWTO (SW_64_R_BRADDR, /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 21, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ 0, /* special_function */
+ "BRADDR", /* name */
+ true, /* partial_inplace */
+ 0x1fffff, /* src_mask */
+ 0x1fffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 26 bit branch. The native assembler generates these for
+ branches within the text segment, and also fills in the PC
+ relative offset in the instruction. */
+ HOWTO (SW_64_R_BR26ADDR, /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 26, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ 0, /* special_function */
+ "BR26ADDR", /* name */
+ true, /* partial_inplace */
+ 0x3ffffff, /* src_mask */
+ 0x3ffffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A hint for a jump to a register. */
+ HOWTO (SW_64_R_HINT, /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 14, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0, /* special_function */
+ "HINT", /* name */
+ true, /* partial_inplace */
+ 0x3fff, /* src_mask */
+ 0x3fff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* 16 bit PC relative offset. */
+ HOWTO (SW_64_R_SREL16, /* type */
+ 0, /* rightshift */
+ 1, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ 0, /* special_function */
+ "SREL16", /* name */
+ true, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* 32 bit PC relative offset. */
+ HOWTO (SW_64_R_SREL32, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ 0, /* special_function */
+ "SREL32", /* name */
+ true, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 64 bit PC relative offset. */
+ HOWTO (SW_64_R_SREL64, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ 0, /* special_function */
+ "SREL64", /* name */
+ true, /* partial_inplace */
+ MINUS_ONE, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Push a value on the reloc evaluation stack. */
+ HOWTO (SW_64_R_OP_PUSH, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0, /* special_function */
+ "OP_PUSH", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Store the value from the stack at the given address. Store it in
+ a bitfield of size r_size starting at bit position r_offset. */
+ HOWTO (SW_64_R_OP_STORE, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0, /* special_function */
+ "OP_STORE", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Subtract the reloc address from the value on the top of the
+ relocation stack. */
+ HOWTO (SW_64_R_OP_PSUB, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0, /* special_function */
+ "OP_PSUB", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Shift the value on the top of the relocation stack right by the
+ given value. */
+ HOWTO (SW_64_R_OP_PRSHIFT, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0, /* special_function */
+ "OP_PRSHIFT", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Adjust the GP value for a new range in the object file. */
+ HOWTO (SW_64_R_GPVALUE, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ 0, /* special_function */
+ "GPVALUE", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ false) /* pcrel_offset */
+};
+
+/* Recognize an SW_64 ECOFF file. */
+
+static bfd_cleanup
+sw_64_ecoff_object_p (bfd *abfd)
+{
+ bfd_cleanup ret;
+
+ ret = coff_object_p (abfd);
+
+ if (ret != NULL)
+ {
+ asection *sec;
+
+ /* SW_64 ECOFF has a .pdata section. The lnnoptr field of the
+ .pdata section is the number of entries it contains. Each
+ entry takes up 8 bytes. The number of entries is required
+ since the section is aligned to a 16 byte boundary. When we
+ link .pdata sections together, we do not want to include the
+ alignment bytes. We handle this on input by faking the size
+ of the .pdata section to remove the unwanted alignment bytes.
+ On output we will set the lnnoptr field and force the
+ alignment. */
+ sec = bfd_get_section_by_name (abfd, _PDATA);
+ if (sec != (asection *) NULL)
+ {
+ bfd_size_type size;
+
+ size = (bfd_size_type) sec->line_filepos * 8;
+ BFD_ASSERT (size == sec->size || size + 8 == sec->size);
+ if (!bfd_set_section_size (sec, size))
+ return NULL;
+ }
+ }
+
+ return ret;
+}
+
+/* See whether the magic number matches. */
+
+static bool
+sw_64_ecoff_bad_format_hook (bfd *abfd ATTRIBUTE_UNUSED, void *filehdr)
+{
+ struct internal_filehdr *internal_f = (struct internal_filehdr *) filehdr;
+
+ if (!SW_64_ECOFF_BADMAG (*internal_f))
+ return true;
+
+ if (SW_64_ECOFF_COMPRESSEDMAG (*internal_f))
+ _bfd_error_handler (
+ _ ("%pB: cannot handle compressed SW_64 binaries; "
+ "use compiler flags, or objZ, to generate uncompressed binaries"),
+ abfd);
+
+ return false;
+}
+
+/* This is a hook called by coff_real_object_p to create any backend
+ specific information. */
+
+static void *
+sw_64_ecoff_mkobject_hook (bfd *abfd, void *filehdr, void *aouthdr)
+{
+ void *ecoff;
+
+ ecoff = _bfd_ecoff_mkobject_hook (abfd, filehdr, aouthdr);
+
+ if (ecoff != NULL)
+ {
+ struct internal_filehdr *internal_f = (struct internal_filehdr *) filehdr;
+
+ /* Set additional BFD flags according to the object type from the
+ machine specific file header flags. */
+ switch (internal_f->f_flags & F_SW_64_OBJECT_TYPE_MASK)
+ {
+ case F_SW_64_SHARABLE:
+ abfd->flags |= DYNAMIC;
+ break;
+ case F_SW_64_CALL_SHARED:
+ /* Always executable if using shared libraries as the run time
+ loader might resolve undefined references. */
+ abfd->flags |= (DYNAMIC | EXEC_P);
+ break;
+ }
+ }
+ return ecoff;
+}
+
+/* Reloc handling. */
+
+/* Swap a reloc in. */
+
+static void
+sw_64_ecoff_swap_reloc_in (bfd *abfd, void *ext_ptr,
+ struct internal_reloc *intern)
+{
+ const RELOC *ext = (RELOC *) ext_ptr;
+
+ intern->r_vaddr = H_GET_64 (abfd, ext->r_vaddr);
+ intern->r_symndx = H_GET_32 (abfd, ext->r_symndx);
+
+ BFD_ASSERT (bfd_header_little_endian (abfd));
+
+ intern->r_type = ((ext->r_bits[0] & RELOC_BITS0_TYPE_LITTLE)
+ >> RELOC_BITS0_TYPE_SH_LITTLE);
+ intern->r_extern = (ext->r_bits[1] & RELOC_BITS1_EXTERN_LITTLE) != 0;
+ intern->r_offset = ((ext->r_bits[1] & RELOC_BITS1_OFFSET_LITTLE)
+ >> RELOC_BITS1_OFFSET_SH_LITTLE);
+ /* Ignored the reserved bits. */
+ intern->r_size = ((ext->r_bits[3] & RELOC_BITS3_SIZE_LITTLE)
+ >> RELOC_BITS3_SIZE_SH_LITTLE);
+
+ if (intern->r_type == SW_64_R_LITUSE || intern->r_type == SW_64_R_GPDISP)
+ {
+ /* Handle the LITUSE and GPDISP relocs specially. Its symndx
+ value is not actually a symbol index, but is instead a
+ special code. We put the code in the r_size field, and
+ clobber the symndx. */
+ if (intern->r_size != 0)
+ abort ();
+ intern->r_size = intern->r_symndx;
+ intern->r_symndx = RELOC_SECTION_NONE;
+ }
+ else if (intern->r_type == SW_64_R_IGNORE)
+ {
+ /* The IGNORE reloc generally follows a GPDISP reloc, and is
+ against the .lita section. The section is irrelevant. */
+ if (!intern->r_extern && intern->r_symndx == RELOC_SECTION_ABS)
+ abort ();
+ if (!intern->r_extern && intern->r_symndx == RELOC_SECTION_LITA)
+ intern->r_symndx = RELOC_SECTION_ABS;
+ }
+}
+
+/* Swap a reloc out. */
+
+static void
+sw_64_ecoff_swap_reloc_out (bfd *abfd, const struct internal_reloc *intern,
+ void *dst)
+{
+ RELOC *ext = (RELOC *) dst;
+ long symndx;
+ unsigned char size;
+
+ /* Undo the hackery done in swap_reloc_in. */
+ if (intern->r_type == SW_64_R_LITUSE || intern->r_type == SW_64_R_GPDISP)
+ {
+ symndx = intern->r_size;
+ size = 0;
+ }
+ else if (intern->r_type == SW_64_R_IGNORE && !intern->r_extern
+ && intern->r_symndx == RELOC_SECTION_ABS)
+ {
+ symndx = RELOC_SECTION_LITA;
+ size = intern->r_size;
+ }
+ else
+ {
+ symndx = intern->r_symndx;
+ size = intern->r_size;
+ }
+
+ /* XXX FIXME: The maximum symndx value used to be 14 but this
+ fails with object files produced by DEC's C++ compiler.
+ Where does the value 14 (or 15) come from anyway ? */
+ BFD_ASSERT (intern->r_extern
+ || (intern->r_symndx >= 0 && intern->r_symndx <= 15));
+
+ H_PUT_64 (abfd, intern->r_vaddr, ext->r_vaddr);
+ H_PUT_32 (abfd, symndx, ext->r_symndx);
+
+ BFD_ASSERT (bfd_header_little_endian (abfd));
+
+ ext->r_bits[0] = ((intern->r_type << RELOC_BITS0_TYPE_SH_LITTLE)
+ & RELOC_BITS0_TYPE_LITTLE);
+ ext->r_bits[1] = ((intern->r_extern ? RELOC_BITS1_EXTERN_LITTLE : 0)
+ | ((intern->r_offset << RELOC_BITS1_OFFSET_SH_LITTLE)
+ & RELOC_BITS1_OFFSET_LITTLE));
+ ext->r_bits[2] = 0;
+ ext->r_bits[3]
+ = ((size << RELOC_BITS3_SIZE_SH_LITTLE) & RELOC_BITS3_SIZE_LITTLE);
+}
+
+/* Finish canonicalizing a reloc. Part of this is generic to all
+ ECOFF targets, and that part is in ecoff.c. The rest is done in
+ this backend routine. It must fill in the howto field. */
+
+static void
+sw_64_adjust_reloc_in (bfd *abfd, const struct internal_reloc *intern,
+ arelent *rptr)
+{
+ if (intern->r_type > SW_64_R_GPVALUE)
+ {
+ /* xgettext:c-format */
+ _bfd_error_handler (_ ("%pB: unsupported relocation type %#x"), abfd,
+ intern->r_type);
+ bfd_set_error (bfd_error_bad_value);
+ rptr->addend = 0;
+ rptr->howto = NULL;
+ return;
+ }
+
+ switch (intern->r_type)
+ {
+ case SW_64_R_BRADDR:
+ case SW_64_R_BR26ADDR:
+ case SW_64_R_SREL16:
+ case SW_64_R_SREL32:
+ case SW_64_R_SREL64:
+ /* This relocs appear to be fully resolved when they are against
+ internal symbols. Against external symbols, BRADDR at least
+ appears to be resolved against the next instruction. */
+ if (!intern->r_extern)
+ rptr->addend = 0;
+ else
+ rptr->addend = -(intern->r_vaddr + 4);
+ break;
+
+ case SW_64_R_GPREL32:
+ case SW_64_R_LITERAL:
+ /* Copy the gp value for this object file into the addend, to
+ ensure that we are not confused by the linker. */
+ if (!intern->r_extern)
+ rptr->addend += ecoff_data (abfd)->gp;
+ break;
+
+ case SW_64_R_LITUSE:
+ case SW_64_R_GPDISP:
+ /* The LITUSE and GPDISP relocs do not use a symbol, or an
+ addend, but they do use a special code. Put this code in the
+ addend field. */
+ rptr->addend = intern->r_size;
+ break;
+
+ case SW_64_R_OP_STORE:
+ /* The STORE reloc needs the size and offset fields. We store
+ them in the addend. */
+ BFD_ASSERT (intern->r_offset <= 256);
+ rptr->addend = (intern->r_offset << 8) + intern->r_size;
+ break;
+
+ case SW_64_R_OP_PUSH:
+ case SW_64_R_OP_PSUB:
+ case SW_64_R_OP_PRSHIFT:
+ /* The PUSH, PSUB and PRSHIFT relocs do not actually use an
+ address. I believe that the address supplied is really an
+ addend. */
+ rptr->addend = intern->r_vaddr;
+ break;
+
+ case SW_64_R_GPVALUE:
+ /* Set the addend field to the new GP value. */
+ rptr->addend = intern->r_symndx + ecoff_data (abfd)->gp;
+ break;
+
+ case SW_64_R_IGNORE:
+ /* If the type is SW_64_R_IGNORE, make sure this is a reference
+ to the absolute section so that the reloc is ignored. For
+ some reason the address of this reloc type is not adjusted by
+ the section vma. We record the gp value for this object file
+ here, for convenience when doing the GPDISP relocation. */
+ rptr->sym_ptr_ptr = bfd_abs_section_ptr->symbol_ptr_ptr;
+ rptr->address = intern->r_vaddr;
+ rptr->addend = ecoff_data (abfd)->gp;
+ break;
+
+ default:
+ break;
+ }
+
+ rptr->howto = &sw_64_howto_table[intern->r_type];
+}
+
+/* When writing out a reloc we need to pull some values back out of
+ the addend field into the reloc. This is roughly the reverse of
+ sw_64_adjust_reloc_in, except that there are several changes we do
+ not need to undo. */
+
+static void
+sw_64_adjust_reloc_out (bfd *abfd ATTRIBUTE_UNUSED, const arelent *rel,
+ struct internal_reloc *intern)
+{
+ switch (intern->r_type)
+ {
+ case SW_64_R_LITUSE:
+ case SW_64_R_GPDISP:
+ intern->r_size = rel->addend;
+ break;
+
+ case SW_64_R_OP_STORE:
+ intern->r_size = rel->addend & 0xff;
+ intern->r_offset = (rel->addend >> 8) & 0xff;
+ break;
+
+ case SW_64_R_OP_PUSH:
+ case SW_64_R_OP_PSUB:
+ case SW_64_R_OP_PRSHIFT:
+ intern->r_vaddr = rel->addend;
+ break;
+
+ case SW_64_R_IGNORE:
+ intern->r_vaddr = rel->address;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* The size of the stack for the relocation evaluator. */
+#define RELOC_STACKSIZE (10)
+
+/* SW_64 ECOFF relocs have a built in expression evaluator as well as
+ other interdependencies. Rather than use a bunch of special
+ functions and global variables, we use a single routine to do all
+ the relocation for a section. I haven't yet worked out how the
+ assembler is going to handle this. */
+
+static bfd_byte *
+sw_64_ecoff_get_relocated_section_contents (bfd *abfd,
+ struct bfd_link_info *link_info,
+ struct bfd_link_order *link_order,
+ bfd_byte *data, bool relocatable,
+ asymbol **symbols)
+{
+ bfd *input_bfd = link_order->u.indirect.section->owner;
+ asection *input_section = link_order->u.indirect.section;
+ long reloc_size = bfd_get_reloc_upper_bound (input_bfd, input_section);
+ arelent **reloc_vector = NULL;
+ long reloc_count;
+ bfd *output_bfd = relocatable ? abfd : (bfd *) NULL;
+ bfd_vma gp;
+ bfd_size_type sz;
+ bool gp_undefined;
+ bfd_vma stack[RELOC_STACKSIZE];
+ int tos = 0;
+
+ if (reloc_size < 0)
+ goto error_return;
+ reloc_vector = (arelent **) bfd_malloc ((bfd_size_type) reloc_size);
+ if (reloc_vector == NULL && reloc_size != 0)
+ goto error_return;
+
+ sz = input_section->rawsize ? input_section->rawsize : input_section->size;
+ if (!bfd_get_section_contents (input_bfd, input_section, data, 0, sz))
+ goto error_return;
+
+ reloc_count
+ = bfd_canonicalize_reloc (input_bfd, input_section, reloc_vector, symbols);
+ if (reloc_count < 0)
+ goto error_return;
+ if (reloc_count == 0)
+ goto successful_return;
+
+ /* Get the GP value for the output BFD. */
+ gp_undefined = false;
+ gp = _bfd_get_gp_value (abfd);
+ if (gp == 0)
+ {
+ if (relocatable)
+ {
+ asection *sec;
+ bfd_vma lo;
+
+ /* Make up a value. */
+ lo = (bfd_vma) -1;
+ for (sec = abfd->sections; sec != NULL; sec = sec->next)
+ {
+ if (sec->vma < lo
+ && (strcmp (sec->name, ".sbss") == 0
+ || strcmp (sec->name, ".sdata") == 0
+ || strcmp (sec->name, ".lit4") == 0
+ || strcmp (sec->name, ".lit8") == 0
+ || strcmp (sec->name, ".lita") == 0))
+ lo = sec->vma;
+ }
+ gp = lo + 0x8000;
+ _bfd_set_gp_value (abfd, gp);
+ }
+ else
+ {
+ struct bfd_link_hash_entry *h;
+
+ h = bfd_link_hash_lookup (link_info->hash, "_gp", false, false, true);
+ if (h == (struct bfd_link_hash_entry *) NULL
+ || h->type != bfd_link_hash_defined)
+ gp_undefined = true;
+ else
+ {
+ gp = (h->u.def.value + h->u.def.section->output_section->vma
+ + h->u.def.section->output_offset);
+ _bfd_set_gp_value (abfd, gp);
+ }
+ }
+ }
+
+ for (; *reloc_vector != (arelent *) NULL; reloc_vector++)
+ {
+ arelent *rel;
+ bfd_reloc_status_type r;
+ char *err;
+
+ rel = *reloc_vector;
+ r = bfd_reloc_ok;
+ switch (rel->howto->type)
+ {
+ case SW_64_R_IGNORE:
+ rel->address += input_section->output_offset;
+ break;
+
+ case SW_64_R_REFLONG:
+ case SW_64_R_REFQUAD:
+ case SW_64_R_BRADDR:
+ case SW_64_R_BR26ADDR:
+ case SW_64_R_HINT:
+ case SW_64_R_SREL16:
+ case SW_64_R_SREL32:
+ case SW_64_R_SREL64:
+ if (relocatable
+ && ((*rel->sym_ptr_ptr)->flags & BSF_SECTION_SYM) == 0)
+ {
+ rel->address += input_section->output_offset;
+ break;
+ }
+ r = bfd_perform_relocation (input_bfd, rel, data, input_section,
+ output_bfd, &err);
+ break;
+
+ case SW_64_R_GPREL32:
+ /* This relocation is used in a switch table. It is a 32
+ bit offset from the current GP value. We must adjust it
+ by the different between the original GP value and the
+ current GP value. The original GP value is stored in the
+ addend. We adjust the addend and let
+ bfd_perform_relocation finish the job. */
+ rel->addend -= gp;
+ r = bfd_perform_relocation (input_bfd, rel, data, input_section,
+ output_bfd, &err);
+ if (r == bfd_reloc_ok && gp_undefined)
+ {
+ r = bfd_reloc_dangerous;
+ err = (char *) _ (
+ "GP relative relocation used when GP not defined");
+ }
+ break;
+
+ case SW_64_R_LITERAL:
+ /* This is a reference to a literal value, generally
+ (always?) in the .lita section. This is a 16 bit GP
+ relative relocation. Sometimes the subsequent reloc is a
+ LITUSE reloc, which indicates how this reloc is used.
+ This sometimes permits rewriting the two instructions
+ referred to by the LITERAL and the LITUSE into different
+ instructions which do not refer to .lita. This can save
+ a memory reference, and permits removing a value from
+ .lita thus saving GP relative space.
+
+ We do not these optimizations. To do them we would need
+ to arrange to link the .lita section first, so that by
+ the time we got here we would know the final values to
+ use. This would not be particularly difficult, but it is
+ not currently implemented. */
+
+ {
+ unsigned long insn;
+
+ /* I believe that the LITERAL reloc will only apply to a
+ ldq or ldl instruction, so check my assumption. */
+ insn = bfd_get_32 (input_bfd, data + rel->address);
+ BFD_ASSERT (((insn >> 26) & 0x3f) == 0x29
+ || ((insn >> 26) & 0x3f) == 0x28);
+
+ rel->addend -= gp;
+ r = bfd_perform_relocation (input_bfd, rel, data, input_section,
+ output_bfd, &err);
+ if (r == bfd_reloc_ok && gp_undefined)
+ {
+ r = bfd_reloc_dangerous;
+ err = (char *) _ (
+ "GP relative relocation used when GP not defined");
+ }
+ }
+ break;
+
+ case SW_64_R_LITUSE:
+ /* See SW_64_R_LITERAL above for the uses of this reloc. It
+ does not cause anything to happen, itself. */
+ rel->address += input_section->output_offset;
+ break;
+
+ case SW_64_R_GPDISP:
+ /* This marks the ldah of an ldah/lda pair which loads the
+ gp register with the difference of the gp value and the
+ current location. The second of the pair is r_size bytes
+ ahead; it used to be marked with an SW_64_R_IGNORE reloc,
+ but that no longer happens in OSF/1 3.2. */
+ {
+ unsigned long insn1, insn2;
+ bfd_vma addend;
+
+ /* Get the two instructions. */
+ insn1 = bfd_get_32 (input_bfd, data + rel->address);
+ insn2 = bfd_get_32 (input_bfd, data + rel->address + rel->addend);
+
+ BFD_ASSERT (((insn1 >> 26) & 0x3f) == 0x09); /* ldah */
+ BFD_ASSERT (((insn2 >> 26) & 0x3f) == 0x08); /* lda */
+
+ /* Get the existing addend. We must account for the sign
+ extension done by lda and ldah. */
+ addend = ((insn1 & 0xffff) << 16) + (insn2 & 0xffff);
+ if (insn1 & 0x8000)
+ {
+ addend -= 0x80000000;
+ addend -= 0x80000000;
+ }
+ if (insn2 & 0x8000)
+ addend -= 0x10000;
+
+ /* The existing addend includes the different between the
+ gp of the input BFD and the address in the input BFD.
+ Subtract this out. */
+ addend -= (ecoff_data (input_bfd)->gp
+ - (input_section->vma + rel->address));
+
+ /* Now add in the final gp value, and subtract out the
+ final address. */
+ addend += (gp
+ - (input_section->output_section->vma
+ + input_section->output_offset + rel->address));
+
+ /* Change the instructions, accounting for the sign
+ extension, and write them out. */
+ if (addend & 0x8000)
+ addend += 0x10000;
+ insn1 = (insn1 & 0xffff0000) | ((addend >> 16) & 0xffff);
+ insn2 = (insn2 & 0xffff0000) | (addend & 0xffff);
+
+ bfd_put_32 (input_bfd, (bfd_vma) insn1, data + rel->address);
+ bfd_put_32 (input_bfd, (bfd_vma) insn2,
+ data + rel->address + rel->addend);
+
+ rel->address += input_section->output_offset;
+ }
+ break;
+
+ case SW_64_R_OP_PUSH:
+ /* Push a value on the reloc evaluation stack. */
+ {
+ asymbol *symbol;
+ bfd_vma relocation;
+
+ if (relocatable)
+ {
+ rel->address += input_section->output_offset;
+ break;
+ }
+
+ /* Figure out the relocation of this symbol. */
+ symbol = *rel->sym_ptr_ptr;
+
+ if (bfd_is_und_section (symbol->section))
+ r = bfd_reloc_undefined;
+
+ if (bfd_is_com_section (symbol->section))
+ relocation = 0;
+ else
+ relocation = symbol->value;
+ relocation += symbol->section->output_section->vma;
+ relocation += symbol->section->output_offset;
+ relocation += rel->addend;
+
+ if (tos >= RELOC_STACKSIZE)
+ abort ();
+
+ stack[tos++] = relocation;
+ }
+ break;
+
+ case SW_64_R_OP_STORE:
+ /* Store a value from the reloc stack into a bitfield. */
+ {
+ bfd_vma val;
+ int offset, size;
+
+ if (relocatable)
+ {
+ rel->address += input_section->output_offset;
+ break;
+ }
+
+ if (tos == 0)
+ abort ();
+
+ /* The offset and size for this reloc are encoded into the
+ addend field by sw_64_adjust_reloc_in. */
+ offset = (rel->addend >> 8) & 0xff;
+ size = rel->addend & 0xff;
+
+ val = bfd_get_64 (abfd, data + rel->address);
+ val &= ~(((1 << size) - 1) << offset);
+ val |= (stack[--tos] & ((1 << size) - 1)) << offset;
+ bfd_put_64 (abfd, val, data + rel->address);
+ }
+ break;
+
+ case SW_64_R_OP_PSUB:
+ /* Subtract a value from the top of the stack. */
+ {
+ asymbol *symbol;
+ bfd_vma relocation;
+
+ if (relocatable)
+ {
+ rel->address += input_section->output_offset;
+ break;
+ }
+
+ /* Figure out the relocation of this symbol. */
+ symbol = *rel->sym_ptr_ptr;
+
+ if (bfd_is_und_section (symbol->section))
+ r = bfd_reloc_undefined;
+
+ if (bfd_is_com_section (symbol->section))
+ relocation = 0;
+ else
+ relocation = symbol->value;
+ relocation += symbol->section->output_section->vma;
+ relocation += symbol->section->output_offset;
+ relocation += rel->addend;
+
+ if (tos == 0)
+ abort ();
+
+ stack[tos - 1] -= relocation;
+ }
+ break;
+
+ case SW_64_R_OP_PRSHIFT:
+ /* Shift the value on the top of the stack. */
+ {
+ asymbol *symbol;
+ bfd_vma relocation;
+
+ if (relocatable)
+ {
+ rel->address += input_section->output_offset;
+ break;
+ }
+
+ /* Figure out the relocation of this symbol. */
+ symbol = *rel->sym_ptr_ptr;
+
+ if (bfd_is_und_section (symbol->section))
+ r = bfd_reloc_undefined;
+
+ if (bfd_is_com_section (symbol->section))
+ relocation = 0;
+ else
+ relocation = symbol->value;
+ relocation += symbol->section->output_section->vma;
+ relocation += symbol->section->output_offset;
+ relocation += rel->addend;
+
+ if (tos == 0)
+ abort ();
+
+ stack[tos - 1] >>= relocation;
+ }
+ break;
+
+ case SW_64_R_GPVALUE:
+ /* I really don't know if this does the right thing. */
+ gp = rel->addend;
+ gp_undefined = false;
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (relocatable)
+ {
+ asection *os = input_section->output_section;
+
+ /* A partial link, so keep the relocs. */
+ os->orelocation[os->reloc_count] = rel;
+ os->reloc_count++;
+ }
+
+ if (r != bfd_reloc_ok)
+ {
+ switch (r)
+ {
+ case bfd_reloc_undefined:
+ (*link_info->callbacks->undefined_symbol) (
+ link_info, bfd_asymbol_name (*rel->sym_ptr_ptr), input_bfd,
+ input_section, rel->address, true);
+ break;
+ case bfd_reloc_dangerous:
+ (*link_info->callbacks->reloc_dangerous) (link_info, err,
+ input_bfd,
+ input_section,
+ rel->address);
+ break;
+ case bfd_reloc_overflow:
+ (*link_info->callbacks->reloc_overflow) (
+ link_info, NULL, bfd_asymbol_name (*rel->sym_ptr_ptr),
+ rel->howto->name, rel->addend, input_bfd, input_section,
+ rel->address);
+ break;
+ case bfd_reloc_outofrange:
+ default:
+ abort ();
+ break;
+ }
+ }
+ }
+
+ if (tos != 0)
+ abort ();
+
+successful_return:
+ free (reloc_vector);
+ return data;
+
+error_return:
+ free (reloc_vector);
+ return NULL;
+}
+
+/* Get the howto structure for a generic reloc type. */
+
+static reloc_howto_type *
+sw_64_bfd_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+ bfd_reloc_code_real_type code)
+{
+ int sw_64_type;
+
+ switch (code)
+ {
+ case BFD_RELOC_32:
+ sw_64_type = SW_64_R_REFLONG;
+ break;
+ case BFD_RELOC_64:
+ case BFD_RELOC_CTOR:
+ sw_64_type = SW_64_R_REFQUAD;
+ break;
+ case BFD_RELOC_GPREL32:
+ sw_64_type = SW_64_R_GPREL32;
+ break;
+ case BFD_RELOC_SW_64_LITERAL:
+ sw_64_type = SW_64_R_LITERAL;
+ break;
+ case BFD_RELOC_SW_64_LITUSE:
+ sw_64_type = SW_64_R_LITUSE;
+ break;
+ case BFD_RELOC_SW_64_GPDISP_HI16:
+ sw_64_type = SW_64_R_GPDISP;
+ break;
+ case BFD_RELOC_SW_64_GPDISP_LO16:
+ sw_64_type = SW_64_R_IGNORE;
+ break;
+ case BFD_RELOC_23_PCREL_S2:
+ sw_64_type = SW_64_R_BRADDR;
+ break;
+ case BFD_RELOC_SW_64_BR26:
+ sw_64_type = SW_64_R_BR26ADDR;
+ break;
+ case BFD_RELOC_SW_64_HINT:
+ sw_64_type = SW_64_R_HINT;
+ break;
+ case BFD_RELOC_16_PCREL:
+ sw_64_type = SW_64_R_SREL16;
+ break;
+ case BFD_RELOC_32_PCREL:
+ sw_64_type = SW_64_R_SREL32;
+ break;
+ case BFD_RELOC_64_PCREL:
+ sw_64_type = SW_64_R_SREL64;
+ break;
+ default:
+ return (reloc_howto_type *) NULL;
+ }
+
+ return &sw_64_howto_table[sw_64_type];
+}
+
+static reloc_howto_type *
+sw_64_bfd_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, const char *r_name)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof (sw_64_howto_table) / sizeof (sw_64_howto_table[0]);
+ i++)
+ if (sw_64_howto_table[i].name != NULL
+ && strcasecmp (sw_64_howto_table[i].name, r_name) == 0)
+ return &sw_64_howto_table[i];
+
+ return NULL;
+}
+
+/* A helper routine for sw_64_relocate_section which converts an
+ external reloc when generating relocatable output. Returns the
+ relocation amount. */
+
+static bfd_vma
+sw_64_convert_external_reloc (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info, bfd *input_bfd,
+ struct external_reloc *ext_rel,
+ struct ecoff_link_hash_entry *h)
+{
+ unsigned long r_symndx;
+ bfd_vma relocation;
+
+ BFD_ASSERT (bfd_link_relocatable (info));
+
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ {
+ asection *hsec;
+ const char *name;
+
+ /* This symbol is defined in the output. Convert the reloc from
+ being against the symbol to being against the section. */
+
+ /* Clear the r_extern bit. */
+ ext_rel->r_bits[1] &= ~RELOC_BITS1_EXTERN_LITTLE;
+
+ /* Compute a new r_symndx value. */
+ hsec = h->root.u.def.section;
+ name = bfd_section_name (hsec->output_section);
+
+ r_symndx = (unsigned long) -1;
+ switch (name[1])
+ {
+ case 'A':
+ if (strcmp (name, "*ABS*") == 0)
+ r_symndx = RELOC_SECTION_ABS;
+ break;
+ case 'b':
+ if (strcmp (name, ".bss") == 0)
+ r_symndx = RELOC_SECTION_BSS;
+ break;
+ case 'd':
+ if (strcmp (name, ".data") == 0)
+ r_symndx = RELOC_SECTION_DATA;
+ break;
+ case 'f':
+ if (strcmp (name, ".fini") == 0)
+ r_symndx = RELOC_SECTION_FINI;
+ break;
+ case 'i':
+ if (strcmp (name, ".init") == 0)
+ r_symndx = RELOC_SECTION_INIT;
+ break;
+ case 'l':
+ if (strcmp (name, ".lita") == 0)
+ r_symndx = RELOC_SECTION_LITA;
+ else if (strcmp (name, ".lit8") == 0)
+ r_symndx = RELOC_SECTION_LIT8;
+ else if (strcmp (name, ".lit4") == 0)
+ r_symndx = RELOC_SECTION_LIT4;
+ break;
+ case 'p':
+ if (strcmp (name, ".pdata") == 0)
+ r_symndx = RELOC_SECTION_PDATA;
+ break;
+ case 'r':
+ if (strcmp (name, ".rdata") == 0)
+ r_symndx = RELOC_SECTION_RDATA;
+ else if (strcmp (name, ".rconst") == 0)
+ r_symndx = RELOC_SECTION_RCONST;
+ break;
+ case 's':
+ if (strcmp (name, ".sdata") == 0)
+ r_symndx = RELOC_SECTION_SDATA;
+ else if (strcmp (name, ".sbss") == 0)
+ r_symndx = RELOC_SECTION_SBSS;
+ break;
+ case 't':
+ if (strcmp (name, ".text") == 0)
+ r_symndx = RELOC_SECTION_TEXT;
+ break;
+ case 'x':
+ if (strcmp (name, ".xdata") == 0)
+ r_symndx = RELOC_SECTION_XDATA;
+ break;
+ }
+
+ if (r_symndx == (unsigned long) -1)
+ abort ();
+
+ /* Add the section VMA and the symbol value. */
+ relocation = (h->root.u.def.value + hsec->output_section->vma
+ + hsec->output_offset);
+ }
+ else
+ {
+ /* Change the symndx value to the right one for
+ the output BFD. */
+ r_symndx = h->indx;
+ if (r_symndx == (unsigned long) -1)
+ {
+ /* Caller must give an error. */
+ r_symndx = 0;
+ }
+ relocation = 0;
+ }
+
+ /* Write out the new r_symndx value. */
+ H_PUT_32 (input_bfd, r_symndx, ext_rel->r_symndx);
+
+ return relocation;
+}
+
+/* Relocate a section while linking an SW_64 ECOFF file. This is
+ quite similar to get_relocated_section_contents. Perhaps they
+ could be combined somehow. */
+
+static bool
+sw_64_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
+ bfd *input_bfd, asection *input_section,
+ bfd_byte *contents, void *external_relocs)
+{
+ asection **symndx_to_section, *lita_sec;
+ struct ecoff_link_hash_entry **sym_hashes;
+ bfd_vma gp;
+ bool gp_undefined;
+ bfd_vma stack[RELOC_STACKSIZE];
+ int tos = 0;
+ struct external_reloc *ext_rel;
+ struct external_reloc *ext_rel_end;
+ bfd_size_type amt;
+
+ /* We keep a table mapping the symndx found in an internal reloc to
+ the appropriate section. This is faster than looking up the
+ section by name each time. */
+ symndx_to_section = ecoff_data (input_bfd)->symndx_to_section;
+ if (symndx_to_section == (asection **) NULL)
+ {
+ amt = NUM_RELOC_SECTIONS * sizeof (asection *);
+ symndx_to_section = (asection **) bfd_alloc (input_bfd, amt);
+ if (!symndx_to_section)
+ return false;
+
+ symndx_to_section[RELOC_SECTION_NONE] = NULL;
+ symndx_to_section[RELOC_SECTION_TEXT]
+ = bfd_get_section_by_name (input_bfd, ".text");
+ symndx_to_section[RELOC_SECTION_RDATA]
+ = bfd_get_section_by_name (input_bfd, ".rdata");
+ symndx_to_section[RELOC_SECTION_DATA]
+ = bfd_get_section_by_name (input_bfd, ".data");
+ symndx_to_section[RELOC_SECTION_SDATA]
+ = bfd_get_section_by_name (input_bfd, ".sdata");
+ symndx_to_section[RELOC_SECTION_SBSS]
+ = bfd_get_section_by_name (input_bfd, ".sbss");
+ symndx_to_section[RELOC_SECTION_BSS]
+ = bfd_get_section_by_name (input_bfd, ".bss");
+ symndx_to_section[RELOC_SECTION_INIT]
+ = bfd_get_section_by_name (input_bfd, ".init");
+ symndx_to_section[RELOC_SECTION_LIT8]
+ = bfd_get_section_by_name (input_bfd, ".lit8");
+ symndx_to_section[RELOC_SECTION_LIT4]
+ = bfd_get_section_by_name (input_bfd, ".lit4");
+ symndx_to_section[RELOC_SECTION_XDATA]
+ = bfd_get_section_by_name (input_bfd, ".xdata");
+ symndx_to_section[RELOC_SECTION_PDATA]
+ = bfd_get_section_by_name (input_bfd, ".pdata");
+ symndx_to_section[RELOC_SECTION_FINI]
+ = bfd_get_section_by_name (input_bfd, ".fini");
+ symndx_to_section[RELOC_SECTION_LITA]
+ = bfd_get_section_by_name (input_bfd, ".lita");
+ symndx_to_section[RELOC_SECTION_ABS] = bfd_abs_section_ptr;
+ symndx_to_section[RELOC_SECTION_RCONST]
+ = bfd_get_section_by_name (input_bfd, ".rconst");
+
+ ecoff_data (input_bfd)->symndx_to_section = symndx_to_section;
+ }
+
+ sym_hashes = ecoff_data (input_bfd)->sym_hashes;
+
+ /* On the SW_64, the .lita section must be addressable by the global
+ pointer. To support large programs, we need to allow multiple
+ global pointers. This works as long as each input .lita section
+ is <64KB big. This implies that when producing relocatable
+ output, the .lita section is limited to 64KB. . */
+
+ lita_sec = symndx_to_section[RELOC_SECTION_LITA];
+ gp = _bfd_get_gp_value (output_bfd);
+ if (!bfd_link_relocatable (info) && lita_sec != NULL)
+ {
+ struct ecoff_section_tdata *lita_sec_data;
+
+ /* Make sure we have a section data structure to which we can
+ hang on to the gp value we pick for the section. */
+ lita_sec_data = ecoff_section_data (input_bfd, lita_sec);
+ if (lita_sec_data == NULL)
+ {
+ amt = sizeof (struct ecoff_section_tdata);
+ lita_sec_data
+ = ((struct ecoff_section_tdata *) bfd_zalloc (input_bfd, amt));
+ lita_sec->used_by_bfd = lita_sec_data;
+ }
+
+ if (lita_sec_data->gp != 0)
+ {
+ /* If we already assigned a gp to this section, we better
+ stick with that value. */
+ gp = lita_sec_data->gp;
+ }
+ else
+ {
+ bfd_vma lita_vma;
+ bfd_size_type lita_size;
+
+ lita_vma = lita_sec->output_offset + lita_sec->output_section->vma;
+ lita_size = lita_sec->size;
+
+ if (gp == 0 || lita_vma < gp - 0x8000
+ || lita_vma + lita_size >= gp + 0x8000)
+ {
+ /* Either gp hasn't been set at all or the current gp
+ cannot address this .lita section. In both cases we
+ reset the gp to point into the "middle" of the
+ current input .lita section. */
+ if (gp && !ecoff_data (output_bfd)->issued_multiple_gp_warning)
+ {
+ (*info->callbacks->warning) (info,
+ _ ("using multiple gp values"),
+ (char *) NULL, output_bfd,
+ (asection *) NULL, (bfd_vma) 0);
+ ecoff_data (output_bfd)->issued_multiple_gp_warning = true;
+ }
+ if (lita_vma < gp - 0x8000)
+ gp = lita_vma + lita_size - 0x8000;
+ else
+ gp = lita_vma + 0x8000;
+ }
+
+ lita_sec_data->gp = gp;
+ }
+
+ _bfd_set_gp_value (output_bfd, gp);
+ }
+
+ gp_undefined = (gp == 0);
+
+ BFD_ASSERT (bfd_header_little_endian (output_bfd));
+ BFD_ASSERT (bfd_header_little_endian (input_bfd));
+
+ ext_rel = (struct external_reloc *) external_relocs;
+ ext_rel_end = ext_rel + input_section->reloc_count;
+ for (; ext_rel < ext_rel_end; ext_rel++)
+ {
+ bfd_vma r_vaddr;
+ unsigned long r_symndx;
+ int r_type;
+ int r_extern;
+ int r_offset;
+ int r_size;
+ bool relocatep;
+ bool adjust_addrp;
+ bool gp_usedp;
+ bfd_vma addend;
+
+ r_vaddr = H_GET_64 (input_bfd, ext_rel->r_vaddr);
+ r_symndx = H_GET_32 (input_bfd, ext_rel->r_symndx);
+
+ r_type = ((ext_rel->r_bits[0] & RELOC_BITS0_TYPE_LITTLE)
+ >> RELOC_BITS0_TYPE_SH_LITTLE);
+ r_extern = (ext_rel->r_bits[1] & RELOC_BITS1_EXTERN_LITTLE) != 0;
+ r_offset = ((ext_rel->r_bits[1] & RELOC_BITS1_OFFSET_LITTLE)
+ >> RELOC_BITS1_OFFSET_SH_LITTLE);
+ /* Ignored the reserved bits. */
+ r_size = ((ext_rel->r_bits[3] & RELOC_BITS3_SIZE_LITTLE)
+ >> RELOC_BITS3_SIZE_SH_LITTLE);
+
+ relocatep = false;
+ adjust_addrp = true;
+ gp_usedp = false;
+ addend = 0;
+
+ switch (r_type)
+ {
+ case SW_64_R_GPRELHIGH:
+ _bfd_error_handler (_ ("%pB: %s unsupported"), input_bfd,
+ "SW_64_R_GPRELHIGH");
+ bfd_set_error (bfd_error_bad_value);
+ continue;
+
+ case SW_64_R_GPRELLOW:
+ _bfd_error_handler (_ ("%pB: %s unsupported"), input_bfd,
+ "SW_64_R_GPRELLOW");
+ bfd_set_error (bfd_error_bad_value);
+ continue;
+
+ default:
+ /* xgettext:c-format */
+ _bfd_error_handler (_ ("%pB: unsupported relocation type %#x"),
+ input_bfd, (int) r_type);
+ bfd_set_error (bfd_error_bad_value);
+ continue;
+
+ case SW_64_R_IGNORE:
+ /* This reloc appears after a GPDISP reloc. On earlier
+ versions of OSF/1, It marked the position of the second
+ instruction to be altered by the GPDISP reloc, but it is
+ not otherwise used for anything. For some reason, the
+ address of the relocation does not appear to include the
+ section VMA, unlike the other relocation types. */
+ if (bfd_link_relocatable (info))
+ H_PUT_64 (input_bfd, input_section->output_offset + r_vaddr,
+ ext_rel->r_vaddr);
+ adjust_addrp = false;
+ break;
+
+ case SW_64_R_REFLONG:
+ case SW_64_R_REFQUAD:
+ case SW_64_R_HINT:
+ relocatep = true;
+ break;
+
+ case SW_64_R_BRADDR:
+ case SW_64_R_BR26ADDR:
+ case SW_64_R_SREL16:
+ case SW_64_R_SREL32:
+ case SW_64_R_SREL64:
+ if (r_extern)
+ addend += -(r_vaddr + 4);
+ relocatep = true;
+ break;
+
+ case SW_64_R_GPREL32:
+ /* This relocation is used in a switch table. It is a 32
+ bit offset from the current GP value. We must adjust it
+ by the different between the original GP value and the
+ current GP value. */
+ relocatep = true;
+ addend = ecoff_data (input_bfd)->gp - gp;
+ gp_usedp = true;
+ break;
+
+ case SW_64_R_LITERAL:
+ /* This is a reference to a literal value, generally
+ (always?) in the .lita section. This is a 16 bit GP
+ relative relocation. Sometimes the subsequent reloc is a
+ LITUSE reloc, which indicates how this reloc is used.
+ This sometimes permits rewriting the two instructions
+ referred to by the LITERAL and the LITUSE into different
+ instructions which do not refer to .lita. This can save
+ a memory reference, and permits removing a value from
+ .lita thus saving GP relative space.
+
+ We do not these optimizations. To do them we would need
+ to arrange to link the .lita section first, so that by
+ the time we got here we would know the final values to
+ use. This would not be particularly difficult, but it is
+ not currently implemented. */
+
+ /* I believe that the LITERAL reloc will only apply to a ldq
+ or ldl instruction, so check my assumption. */
+ {
+ unsigned long insn;
+
+ insn
+ = bfd_get_32 (input_bfd, contents + r_vaddr - input_section->vma);
+ BFD_ASSERT (((insn >> 26) & 0x3f) == 0x29
+ || ((insn >> 26) & 0x3f) == 0x28);
+ }
+
+ relocatep = true;
+ addend = ecoff_data (input_bfd)->gp - gp;
+ gp_usedp = true;
+ break;
+
+ case SW_64_R_LITUSE:
+ /* See SW_64_R_LITERAL above for the uses of this reloc. It
+ does not cause anything to happen, itself. */
+ break;
+
+ case SW_64_R_GPDISP:
+ /* This marks the ldah of an ldah/lda pair which loads the
+ gp register with the difference of the gp value and the
+ current location. The second of the pair is r_symndx
+ bytes ahead. It used to be marked with an SW_64_R_IGNORE
+ reloc, but OSF/1 3.2 no longer does that. */
+ {
+ unsigned long insn1, insn2;
+
+ /* Get the two instructions. */
+ insn1
+ = bfd_get_32 (input_bfd, contents + r_vaddr - input_section->vma);
+ insn2 = bfd_get_32 (input_bfd, (contents + r_vaddr
+ - input_section->vma + r_symndx));
+
+ BFD_ASSERT (((insn1 >> 26) & 0x3f) == 0x09); /* ldah */
+ BFD_ASSERT (((insn2 >> 26) & 0x3f) == 0x08); /* lda */
+
+ /* Get the existing addend. We must account for the sign
+ extension done by lda and ldah. */
+ addend = ((insn1 & 0xffff) << 16) + (insn2 & 0xffff);
+ if (insn1 & 0x8000)
+ {
+ /* This is addend -= 0x100000000 without causing an
+ integer overflow on a 32 bit host. */
+ addend -= 0x80000000;
+ addend -= 0x80000000;
+ }
+ if (insn2 & 0x8000)
+ addend -= 0x10000;
+
+ /* The existing addend includes the difference between the
+ gp of the input BFD and the address in the input BFD.
+ We want to change this to the difference between the
+ final GP and the final address. */
+ addend += (gp - ecoff_data (input_bfd)->gp + input_section->vma
+ - (input_section->output_section->vma
+ + input_section->output_offset));
+
+ /* Change the instructions, accounting for the sign
+ extension, and write them out. */
+ if (addend & 0x8000)
+ addend += 0x10000;
+ insn1 = (insn1 & 0xffff0000) | ((addend >> 16) & 0xffff);
+ insn2 = (insn2 & 0xffff0000) | (addend & 0xffff);
+
+ bfd_put_32 (input_bfd, (bfd_vma) insn1,
+ contents + r_vaddr - input_section->vma);
+ bfd_put_32 (input_bfd, (bfd_vma) insn2,
+ contents + r_vaddr - input_section->vma + r_symndx);
+
+ gp_usedp = true;
+ }
+ break;
+
+ case SW_64_R_OP_PUSH:
+ case SW_64_R_OP_PSUB:
+ case SW_64_R_OP_PRSHIFT:
+ /* Manipulate values on the reloc evaluation stack. The
+ r_vaddr field is not an address in input_section, it is
+ the current value (including any addend) of the object
+ being used. */
+ if (!r_extern)
+ {
+ asection *s;
+
+ s = symndx_to_section[r_symndx];
+ if (s == (asection *) NULL)
+ abort ();
+ addend = s->output_section->vma + s->output_offset - s->vma;
+ }
+ else
+ {
+ struct ecoff_link_hash_entry *h;
+
+ h = sym_hashes[r_symndx];
+ if (h == (struct ecoff_link_hash_entry *) NULL)
+ abort ();
+
+ if (!bfd_link_relocatable (info))
+ {
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ addend = (h->root.u.def.value
+ + h->root.u.def.section->output_section->vma
+ + h->root.u.def.section->output_offset);
+ else
+ {
+ /* Note that we pass the address as 0, since we
+ do not have a meaningful number for the
+ location within the section that is being
+ relocated. */
+ (*info->callbacks->undefined_symbol) (info,
+ h->root.root.string,
+ input_bfd,
+ input_section,
+ (bfd_vma) 0, true);
+ addend = 0;
+ }
+ }
+ else
+ {
+ if (h->root.type != bfd_link_hash_defined
+ && h->root.type != bfd_link_hash_defweak && h->indx == -1)
+ {
+ /* This symbol is not being written out. Pass
+ the address as 0, as with undefined_symbol,
+ above. */
+ (*info->callbacks->unattached_reloc) (info,
+ h->root.root.string,
+ input_bfd,
+ input_section,
+ (bfd_vma) 0);
+ }
+
+ addend = sw_64_convert_external_reloc (output_bfd, info,
+ input_bfd, ext_rel, h);
+ }
+ }
+
+ addend += r_vaddr;
+
+ if (bfd_link_relocatable (info))
+ {
+ /* Adjust r_vaddr by the addend. */
+ H_PUT_64 (input_bfd, addend, ext_rel->r_vaddr);
+ }
+ else
+ {
+ switch (r_type)
+ {
+ case SW_64_R_OP_PUSH:
+ if (tos >= RELOC_STACKSIZE)
+ abort ();
+ stack[tos++] = addend;
+ break;
+
+ case SW_64_R_OP_PSUB:
+ if (tos == 0)
+ abort ();
+ stack[tos - 1] -= addend;
+ break;
+
+ case SW_64_R_OP_PRSHIFT:
+ if (tos == 0)
+ abort ();
+ stack[tos - 1] >>= addend;
+ break;
+ }
+ }
+
+ adjust_addrp = false;
+ break;
+
+ case SW_64_R_OP_STORE:
+ /* Store a value from the reloc stack into a bitfield. If
+ we are generating relocatable output, all we do is
+ adjust the address of the reloc. */
+ if (!bfd_link_relocatable (info))
+ {
+ bfd_vma mask;
+ bfd_vma val;
+
+ if (tos == 0)
+ abort ();
+
+ /* Get the relocation mask. The separate steps and the
+ casts to bfd_vma are attempts to avoid a bug in the
+ SW_64 OSF 1.3 C compiler. See reloc.c for more
+ details. */
+ mask = 1;
+ mask <<= (bfd_vma) r_size;
+ mask -= 1;
+
+ /* FIXME: I don't know what kind of overflow checking,
+ if any, should be done here. */
+ val = bfd_get_64 (input_bfd,
+ contents + r_vaddr - input_section->vma);
+ val &= ~mask << (bfd_vma) r_offset;
+ val |= (stack[--tos] & mask) << (bfd_vma) r_offset;
+ bfd_put_64 (input_bfd, val,
+ contents + r_vaddr - input_section->vma);
+ }
+ break;
+
+ case SW_64_R_GPVALUE:
+ /* I really don't know if this does the right thing. */
+ gp = ecoff_data (input_bfd)->gp + r_symndx;
+ gp_undefined = false;
+ break;
+ }
+
+ if (relocatep)
+ {
+ reloc_howto_type *howto;
+ struct ecoff_link_hash_entry *h = NULL;
+ asection *s = NULL;
+ bfd_vma relocation;
+ bfd_reloc_status_type r;
+
+ /* Perform a relocation. */
+
+ howto = &sw_64_howto_table[r_type];
+
+ if (r_extern)
+ {
+ h = sym_hashes[r_symndx];
+ /* If h is NULL, that means that there is a reloc
+ against an external symbol which we thought was just
+ a debugging symbol. This should not happen. */
+ if (h == (struct ecoff_link_hash_entry *) NULL)
+ abort ();
+ }
+ else
+ {
+ if (r_symndx >= NUM_RELOC_SECTIONS)
+ s = NULL;
+ else
+ s = symndx_to_section[r_symndx];
+
+ if (s == (asection *) NULL)
+ abort ();
+ }
+
+ if (bfd_link_relocatable (info))
+ {
+ /* We are generating relocatable output, and must
+ convert the existing reloc. */
+ if (r_extern)
+ {
+ if (h->root.type != bfd_link_hash_defined
+ && h->root.type != bfd_link_hash_defweak && h->indx == -1)
+ {
+ /* This symbol is not being written out. */
+ (*info->callbacks->unattached_reloc) (
+ info, h->root.root.string, input_bfd, input_section,
+ r_vaddr - input_section->vma);
+ }
+
+ relocation
+ = sw_64_convert_external_reloc (output_bfd, info, input_bfd,
+ ext_rel, h);
+ }
+ else
+ {
+ /* This is a relocation against a section. Adjust
+ the value by the amount the section moved. */
+ relocation
+ = (s->output_section->vma + s->output_offset - s->vma);
+ }
+
+ /* If this is PC relative, the existing object file
+ appears to already have the reloc worked out. We
+ must subtract out the old value and add in the new
+ one. */
+ if (howto->pc_relative)
+ relocation
+ -= (input_section->output_section->vma
+ + input_section->output_offset - input_section->vma);
+
+ /* Put in any addend. */
+ relocation += addend;
+
+ /* Adjust the contents. */
+ r = _bfd_relocate_contents (howto, input_bfd, relocation,
+ (contents + r_vaddr
+ - input_section->vma));
+ }
+ else
+ {
+ /* We are producing a final executable. */
+ if (r_extern)
+ {
+ /* This is a reloc against a symbol. */
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ {
+ asection *hsec;
+
+ hsec = h->root.u.def.section;
+ relocation
+ = (h->root.u.def.value + hsec->output_section->vma
+ + hsec->output_offset);
+ }
+ else
+ {
+ (*info->callbacks->undefined_symbol) (
+ info, h->root.root.string, input_bfd, input_section,
+ r_vaddr - input_section->vma, true);
+ relocation = 0;
+ }
+ }
+ else
+ {
+ /* This is a reloc against a section. */
+ relocation
+ = (s->output_section->vma + s->output_offset - s->vma);
+
+ /* Adjust a PC relative relocation by removing the
+ reference to the original source section. */
+ if (howto->pc_relative)
+ relocation += input_section->vma;
+ }
+
+ r = _bfd_final_link_relocate (howto, input_bfd, input_section,
+ contents,
+ r_vaddr - input_section->vma,
+ relocation, addend);
+ }
+
+ if (r != bfd_reloc_ok)
+ {
+ switch (r)
+ {
+ default:
+ case bfd_reloc_outofrange:
+ abort ();
+ case bfd_reloc_overflow: {
+ const char *name;
+
+ if (r_extern)
+ name = sym_hashes[r_symndx]->root.root.string;
+ else
+ name = bfd_section_name (symndx_to_section[r_symndx]);
+ (*info->callbacks->reloc_overflow) (
+ info, NULL, name, sw_64_howto_table[r_type].name,
+ (bfd_vma) 0, input_bfd, input_section,
+ r_vaddr - input_section->vma);
+ }
+ break;
+ }
+ }
+ }
+
+ if (bfd_link_relocatable (info) && adjust_addrp)
+ {
+ /* Change the address of the relocation. */
+ H_PUT_64 (input_bfd,
+ (input_section->output_section->vma
+ + input_section->output_offset - input_section->vma
+ + r_vaddr),
+ ext_rel->r_vaddr);
+ }
+
+ if (gp_usedp && gp_undefined)
+ {
+ (*info->callbacks->reloc_dangerous) (
+ info, _ ("GP relative relocation used when GP not defined"),
+ input_bfd, input_section, r_vaddr - input_section->vma);
+ /* Only give the error once per link. */
+ gp = 4;
+ _bfd_set_gp_value (output_bfd, gp);
+ gp_undefined = false;
+ }
+ }
+
+ if (tos != 0)
+ abort ();
+
+ return true;
+}
+
+/* Do final adjustments to the filehdr and the aouthdr. This routine
+ sets the dynamic bits in the file header. */
+
+static bool
+sw_64_adjust_headers (bfd *abfd, struct internal_filehdr *fhdr,
+ struct internal_aouthdr *ahdr ATTRIBUTE_UNUSED)
+{
+ if ((abfd->flags & (DYNAMIC | EXEC_P)) == (DYNAMIC | EXEC_P))
+ fhdr->f_flags |= F_SW_64_CALL_SHARED;
+ else if ((abfd->flags & DYNAMIC) != 0)
+ fhdr->f_flags |= F_SW_64_SHARABLE;
+ return true;
+}
+
+/* Archive handling. In OSF/1 (or Digital Unix) v3.2, Digital
+ introduced archive packing, in which the elements in an archive are
+ optionally compressed using a simple dictionary scheme. We know
+ how to read such archives, but we don't write them. */
+
+#define sw_64_ecoff_slurp_armap _bfd_ecoff_slurp_armap
+#define sw_64_ecoff_slurp_extended_name_table \
+ _bfd_ecoff_slurp_extended_name_table
+#define sw_64_ecoff_construct_extended_name_table \
+ _bfd_ecoff_construct_extended_name_table
+#define sw_64_ecoff_truncate_arname _bfd_ecoff_truncate_arname
+#define sw_64_ecoff_write_armap _bfd_ecoff_write_armap
+#define sw_64_ecoff_write_ar_hdr _bfd_generic_write_ar_hdr
+#define sw_64_ecoff_generic_stat_arch_elt _bfd_ecoff_generic_stat_arch_elt
+#define sw_64_ecoff_update_armap_timestamp _bfd_ecoff_update_armap_timestamp
+
+/* A compressed file uses this instead of ARFMAG. */
+
+#define ARFZMAG "Z\012"
+
+/* Read an archive header. This is like the standard routine, but it
+ also accepts ARFZMAG. */
+
+static void *
+sw_64_ecoff_read_ar_hdr (bfd *abfd)
+{
+ struct areltdata *ret;
+ struct ar_hdr *h;
+
+ ret = (struct areltdata *) _bfd_generic_read_ar_hdr_mag (abfd, ARFZMAG);
+ if (ret == NULL)
+ return NULL;
+
+ h = (struct ar_hdr *) ret->arch_header;
+ if (strncmp (h->ar_fmag, ARFZMAG, 2) == 0)
+ {
+ bfd_byte ab[8];
+
+ /* This is a compressed file. We must set the size correctly.
+ The size is the eight bytes after the dummy file header. */
+ if (bfd_seek (abfd, (file_ptr) FILHSZ, SEEK_CUR) != 0
+ || bfd_bread (ab, (bfd_size_type) 8, abfd) != 8
+ || bfd_seek (abfd, (file_ptr) (-(FILHSZ + 8)), SEEK_CUR) != 0)
+ {
+ free (ret);
+ return NULL;
+ }
+
+ ret->parsed_size = H_GET_64 (abfd, ab);
+ }
+
+ return ret;
+}
+
+/* Get an archive element at a specified file position. This is where
+ we uncompress the archive element if necessary. */
+
+static bfd *
+sw_64_ecoff_get_elt_at_filepos (bfd *archive, file_ptr filepos,
+ struct bfd_link_info *info)
+{
+ bfd *nbfd = NULL;
+ struct areltdata *tdata;
+ struct ar_hdr *hdr;
+ bfd_byte ab[8];
+ bfd_size_type size;
+ bfd_byte *buf, *p;
+ struct bfd_in_memory *bim;
+ ufile_ptr filesize;
+
+ buf = NULL;
+ nbfd = _bfd_get_elt_at_filepos (archive, filepos, info);
+ if (nbfd == NULL)
+ goto error_return;
+
+ if ((nbfd->flags & BFD_IN_MEMORY) != 0)
+ {
+ /* We have already expanded this BFD. */
+ return nbfd;
+ }
+
+ tdata = (struct areltdata *) nbfd->arelt_data;
+ hdr = (struct ar_hdr *) tdata->arch_header;
+ if (strncmp (hdr->ar_fmag, ARFZMAG, 2) != 0)
+ return nbfd;
+
+ /* We must uncompress this element. We do this by copying it into a
+ memory buffer, and making bfd_bread and bfd_seek use that buffer.
+ This can use a lot of memory, but it's simpler than getting a
+ temporary file, making that work with the file descriptor caching
+ code, and making sure that it is deleted at all appropriate
+ times. It can be changed if it ever becomes important. */
+
+ /* The compressed file starts with a dummy ECOFF file header. */
+ if (bfd_seek (nbfd, (file_ptr) FILHSZ, SEEK_SET) != 0)
+ goto error_return;
+
+ /* The next eight bytes are the real file size. */
+ if (bfd_bread (ab, (bfd_size_type) 8, nbfd) != 8)
+ goto error_return;
+ size = H_GET_64 (nbfd, ab);
+
+ /* The decompression algorithm will at most expand by eight times. */
+ filesize = bfd_get_file_size (archive);
+ if (filesize != 0 && size / 8 > filesize)
+ {
+ bfd_set_error (bfd_error_malformed_archive);
+ goto error_return;
+ }
+
+ if (size != 0)
+ {
+ bfd_size_type left;
+ bfd_byte dict[4096];
+ unsigned int h;
+ bfd_byte b;
+
+ buf = (bfd_byte *) bfd_malloc (size);
+ if (buf == NULL)
+ goto error_return;
+ p = buf;
+
+ left = size;
+
+ /* I don't know what the next eight bytes are for. */
+ if (bfd_bread (ab, (bfd_size_type) 8, nbfd) != 8)
+ goto error_return;
+
+ /* This is the uncompression algorithm. It's a simple
+ dictionary based scheme in which each character is predicted
+ by a hash of the previous three characters. A control byte
+ indicates whether the character is predicted or whether it
+ appears in the input stream; each control byte manages the
+ next eight bytes in the output stream. */
+ memset (dict, 0, sizeof dict);
+ h = 0;
+ while (bfd_bread (&b, (bfd_size_type) 1, nbfd) == 1)
+ {
+ unsigned int i;
+
+ for (i = 0; i < 8; i++, b >>= 1)
+ {
+ bfd_byte n;
+
+ if ((b & 1) == 0)
+ n = dict[h];
+ else
+ {
+ if (bfd_bread (&n, 1, nbfd) != 1)
+ goto error_return;
+ dict[h] = n;
+ }
+
+ *p++ = n;
+
+ --left;
+ if (left == 0)
+ break;
+
+ h <<= 4;
+ h ^= n;
+ h &= sizeof dict - 1;
+ }
+
+ if (left == 0)
+ break;
+ }
+ }
+
+ /* Now the uncompressed file contents are in buf. */
+ bim = ((struct bfd_in_memory *) bfd_malloc (
+ (bfd_size_type) sizeof (struct bfd_in_memory)));
+ if (bim == NULL)
+ goto error_return;
+ bim->size = size;
+ bim->buffer = buf;
+
+ nbfd->mtime_set = true;
+ nbfd->mtime = strtol (hdr->ar_date, (char **) NULL, 10);
+
+ nbfd->flags |= BFD_IN_MEMORY;
+ nbfd->iostream = bim;
+ nbfd->iovec = &_bfd_memory_iovec;
+ nbfd->origin = 0;
+ BFD_ASSERT (!nbfd->cacheable);
+
+ return nbfd;
+
+error_return:
+ free (buf);
+ if (nbfd != NULL)
+ bfd_close (nbfd);
+ return NULL;
+}
+
+/* Open the next archived file. */
+
+static bfd *
+sw_64_ecoff_openr_next_archived_file (bfd *archive, bfd *last_file)
+{
+ ufile_ptr filestart;
+
+ if (last_file == NULL)
+ filestart = bfd_ardata (archive)->first_file_filepos;
+ else
+ {
+ struct areltdata *t;
+ struct ar_hdr *h;
+ bfd_size_type size;
+
+ /* We can't use arelt_size here, because that uses parsed_size,
+ which is the uncompressed size. We need the compressed size. */
+ t = (struct areltdata *) last_file->arelt_data;
+ h = (struct ar_hdr *) t->arch_header;
+ size = strtol (h->ar_size, (char **) NULL, 10);
+
+ /* Pad to an even boundary...
+ Note that last_file->origin can be odd in the case of
+ BSD-4.4-style element with a long odd size. */
+ filestart = last_file->proxy_origin + size;
+ filestart += filestart % 2;
+ if (filestart < last_file->proxy_origin)
+ {
+ /* Prevent looping. See PR19256. */
+ bfd_set_error (bfd_error_malformed_archive);
+ return NULL;
+ }
+ }
+
+ return sw_64_ecoff_get_elt_at_filepos (archive, filestart, NULL);
+}
+
+/* Open the archive file given an index into the armap. */
+
+static bfd *
+sw_64_ecoff_get_elt_at_index (bfd *abfd, symindex sym_index)
+{
+ carsym *entry;
+
+ entry = bfd_ardata (abfd)->symdefs + sym_index;
+ return sw_64_ecoff_get_elt_at_filepos (abfd, entry->file_offset, NULL);
+}
+
+static void
+sw_64_ecoff_swap_coff_aux_in (bfd *abfd ATTRIBUTE_UNUSED,
+ void *ext1 ATTRIBUTE_UNUSED,
+ int type ATTRIBUTE_UNUSED,
+ int in_class ATTRIBUTE_UNUSED,
+ int indx ATTRIBUTE_UNUSED,
+ int numaux ATTRIBUTE_UNUSED,
+ void *in1 ATTRIBUTE_UNUSED)
+{}
+
+static void
+sw_64_ecoff_swap_coff_sym_in (bfd *abfd ATTRIBUTE_UNUSED,
+ void *ext1 ATTRIBUTE_UNUSED,
+ void *in1 ATTRIBUTE_UNUSED)
+{}
+
+static void
+sw_64_ecoff_swap_coff_lineno_in (bfd *abfd ATTRIBUTE_UNUSED,
+ void *ext1 ATTRIBUTE_UNUSED,
+ void *in1 ATTRIBUTE_UNUSED)
+{}
+
+static unsigned int
+sw_64_ecoff_swap_coff_aux_out (bfd *abfd ATTRIBUTE_UNUSED,
+ void *inp ATTRIBUTE_UNUSED,
+ int type ATTRIBUTE_UNUSED,
+ int in_class ATTRIBUTE_UNUSED,
+ int indx ATTRIBUTE_UNUSED,
+ int numaux ATTRIBUTE_UNUSED,
+ void *extp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned int
+sw_64_ecoff_swap_coff_sym_out (bfd *abfd ATTRIBUTE_UNUSED,
+ void *inp ATTRIBUTE_UNUSED,
+ void *extp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned int
+sw_64_ecoff_swap_coff_lineno_out (bfd *abfd ATTRIBUTE_UNUSED,
+ void *inp ATTRIBUTE_UNUSED,
+ void *extp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned int
+sw_64_ecoff_swap_coff_reloc_out (bfd *abfd ATTRIBUTE_UNUSED,
+ void *inp ATTRIBUTE_UNUSED,
+ void *extp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+/* This is the ECOFF backend structure. The backend field of the
+ target vector points to this. */
+
+static const struct ecoff_backend_data sw_64_ecoff_backend_data = {
+ /* COFF backend structure. */
+ {sw_64_ecoff_swap_coff_aux_in,
+ sw_64_ecoff_swap_coff_sym_in,
+ sw_64_ecoff_swap_coff_lineno_in,
+ sw_64_ecoff_swap_coff_aux_out,
+ sw_64_ecoff_swap_coff_sym_out,
+ sw_64_ecoff_swap_coff_lineno_out,
+ sw_64_ecoff_swap_coff_reloc_out,
+ sw_64_ecoff_swap_filehdr_out,
+ sw_64_ecoff_swap_aouthdr_out,
+ sw_64_ecoff_swap_scnhdr_out,
+ FILHSZ,
+ AOUTSZ,
+ SCNHSZ,
+ 0,
+ 0,
+ 0,
+ 0,
+ FILNMLEN,
+ true,
+ ECOFF_NO_LONG_SECTION_NAMES,
+ 4,
+ false,
+ 2,
+ 32768,
+ sw_64_ecoff_swap_filehdr_in,
+ sw_64_ecoff_swap_aouthdr_in,
+ sw_64_ecoff_swap_scnhdr_in,
+ NULL,
+ sw_64_ecoff_bad_format_hook,
+ _bfd_ecoff_set_arch_mach_hook,
+ sw_64_ecoff_mkobject_hook,
+ _bfd_ecoff_styp_to_sec_flags,
+ _bfd_ecoff_set_alignment_hook,
+ _bfd_ecoff_slurp_symbol_table,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL},
+ /* Supported architecture. */
+ bfd_arch_sw_64,
+ /* Initial portion of armap string. */
+ "________64",
+ /* The page boundary used to align sections in a demand-paged
+ executable file. E.g., 0x1000. */
+ 0x2000,
+ /* true if the .rdata section is part of the text segment, as on the
+ SW_64. false if .rdata is part of the data segment, as on the
+ MIPS. */
+ true,
+ /* Bitsize of constructor entries. */
+ 64,
+ /* Reloc to use for constructor entries. */
+ &sw_64_howto_table[SW_64_R_REFQUAD],
+ {/* Symbol table magic number. */
+ magicSym2,
+ /* Alignment of debugging information. E.g., 4. */
+ 8,
+ /* Sizes of external symbolic information. */
+ sizeof (struct hdr_ext), sizeof (struct dnr_ext), sizeof (struct pdr_ext),
+ sizeof (struct sym_ext), sizeof (struct opt_ext), sizeof (struct fdr_ext),
+ sizeof (struct rfd_ext), sizeof (struct ext_ext),
+ /* Functions to swap in external symbolic data. */
+ ecoff_swap_hdr_in, ecoff_swap_dnr_in, ecoff_swap_pdr_in, ecoff_swap_sym_in,
+ ecoff_swap_opt_in, ecoff_swap_fdr_in, ecoff_swap_rfd_in, ecoff_swap_ext_in,
+ _bfd_ecoff_swap_tir_in, _bfd_ecoff_swap_rndx_in,
+ /* Functions to swap out external symbolic data. */
+ ecoff_swap_hdr_out, ecoff_swap_dnr_out, ecoff_swap_pdr_out,
+ ecoff_swap_sym_out, ecoff_swap_opt_out, ecoff_swap_fdr_out,
+ ecoff_swap_rfd_out, ecoff_swap_ext_out, _bfd_ecoff_swap_tir_out,
+ _bfd_ecoff_swap_rndx_out,
+ /* Function to read in symbolic data. */
+ _bfd_ecoff_slurp_symbolic_info},
+ /* External reloc size. */
+ RELSZ,
+ /* Reloc swapping functions. */
+ sw_64_ecoff_swap_reloc_in,
+ sw_64_ecoff_swap_reloc_out,
+ /* Backend reloc tweaking. */
+ sw_64_adjust_reloc_in,
+ sw_64_adjust_reloc_out,
+ /* Relocate section contents while linking. */
+ sw_64_relocate_section,
+ /* Do final adjustments to filehdr and aouthdr. */
+ sw_64_adjust_headers,
+ /* Read an element from an archive at a given file position. */
+ sw_64_ecoff_get_elt_at_filepos};
+
+/* Looking up a reloc type is SW_64 specific. */
+#define _bfd_ecoff_bfd_reloc_type_lookup sw_64_bfd_reloc_type_lookup
+#define _bfd_ecoff_bfd_reloc_name_lookup sw_64_bfd_reloc_name_lookup
+
+/* So is getting relocated section contents. */
+#define _bfd_ecoff_bfd_get_relocated_section_contents \
+ sw_64_ecoff_get_relocated_section_contents
+
+/* Handling file windows is generic. */
+#define _bfd_ecoff_get_section_contents_in_window \
+ _bfd_generic_get_section_contents_in_window
+
+/* Input section flag lookup is generic. */
+#define _bfd_ecoff_bfd_lookup_section_flags bfd_generic_lookup_section_flags
+
+/* Relaxing sections is generic. */
+#define _bfd_ecoff_bfd_relax_section bfd_generic_relax_section
+#define _bfd_ecoff_bfd_gc_sections bfd_generic_gc_sections
+#define _bfd_ecoff_bfd_merge_sections bfd_generic_merge_sections
+#define _bfd_ecoff_bfd_is_group_section bfd_generic_is_group_section
+#define _bfd_ecoff_bfd_group_name bfd_generic_group_name
+#define _bfd_ecoff_bfd_discard_group bfd_generic_discard_group
+#define _bfd_ecoff_section_already_linked _bfd_coff_section_already_linked
+#define _bfd_ecoff_bfd_define_common_symbol bfd_generic_define_common_symbol
+#define _bfd_ecoff_bfd_link_hide_symbol _bfd_generic_link_hide_symbol
+#define _bfd_ecoff_bfd_define_start_stop bfd_generic_define_start_stop
+#define _bfd_ecoff_bfd_link_check_relocs _bfd_generic_link_check_relocs
+
+/* Installing internal relocations in a section is also generic. */
+#define _bfd_ecoff_set_reloc _bfd_generic_set_reloc
+
+const bfd_target sw_64_ecoff_le_vec
+ = {"ecoff-littlesw_64", /* name */
+ bfd_target_ecoff_flavour,
+ BFD_ENDIAN_LITTLE, /* data byte order is little */
+ BFD_ENDIAN_LITTLE, /* header byte order is little */
+
+ (HAS_RELOC | EXEC_P /* object flags */
+ | HAS_LINENO | HAS_DEBUG | HAS_SYMS | HAS_LOCALS | DYNAMIC | WP_TEXT
+ | D_PAGED),
+
+ (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC | SEC_CODE | SEC_DATA
+ | SEC_SMALL_DATA),
+ 0, /* leading underscore */
+ ' ', /* ar_pad_char */
+ 15, /* ar_max_namelen */
+ 0, /* match priority. */
+ TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols. */
+ bfd_getl64,
+ bfd_getl_signed_64,
+ bfd_putl64,
+ bfd_getl32,
+ bfd_getl_signed_32,
+ bfd_putl32,
+ bfd_getl16,
+ bfd_getl_signed_16,
+ bfd_putl16, /* data */
+ bfd_getl64,
+ bfd_getl_signed_64,
+ bfd_putl64,
+ bfd_getl32,
+ bfd_getl_signed_32,
+ bfd_putl32,
+ bfd_getl16,
+ bfd_getl_signed_16,
+ bfd_putl16, /* hdrs */
+
+ {/* bfd_check_format */
+ _bfd_dummy_target, sw_64_ecoff_object_p, bfd_generic_archive_p,
+ _bfd_dummy_target},
+ {/* bfd_set_format */
+ _bfd_bool_bfd_false_error, _bfd_ecoff_mkobject, _bfd_generic_mkarchive,
+ _bfd_bool_bfd_false_error},
+ {/* bfd_write_contents */
+ _bfd_bool_bfd_false_error, _bfd_ecoff_write_object_contents,
+ _bfd_write_archive_contents, _bfd_bool_bfd_false_error},
+
+ BFD_JUMP_TABLE_GENERIC (_bfd_ecoff),
+ BFD_JUMP_TABLE_COPY (_bfd_ecoff),
+ BFD_JUMP_TABLE_CORE (_bfd_nocore),
+ BFD_JUMP_TABLE_ARCHIVE (sw_64_ecoff),
+ BFD_JUMP_TABLE_SYMBOLS (_bfd_ecoff),
+ BFD_JUMP_TABLE_RELOCS (_bfd_ecoff),
+ BFD_JUMP_TABLE_WRITE (_bfd_ecoff),
+ BFD_JUMP_TABLE_LINK (_bfd_ecoff),
+ BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
+
+ NULL,
+
+ &sw_64_ecoff_backend_data};
diff --git a/bfd/config.bfd b/bfd/config.bfd
index bdee5395..85434116 100644
--- a/bfd/config.bfd
+++ b/bfd/config.bfd
@@ -179,6 +179,7 @@ targ_cpu=`echo $targ | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
case "${targ_cpu}" in
aarch64*) targ_archs="bfd_aarch64_arch bfd_arm_arch";;
alpha*) targ_archs=bfd_alpha_arch ;;
+sw_64*) targ_archs=bfd_sw_64_arch ;;
am33_2.0*) targ_archs=bfd_mn10300_arch ;;
arc*) targ_archs=bfd_arc_arch ;;
arm*) targ_archs=bfd_arm_arch ;;
@@ -327,6 +328,55 @@ case "${targ}" in
targ_defvec=alpha_ecoff_le_vec
want64=true
;;
+ sw_64*-*-freebsd* | sw_64*-*-kfreebsd*-gnu)
+ targ_defvec=sw_64_elf64_fbsd_vec
+ targ_selvecs="sw_64_elf64_vec sw_64_ecoff_le_vec"
+ want64=true
+ # FreeBSD <= 4.0 supports only the old nonstandard way of ABI labelling.
+ case "${targ}" in
+ sw_64*-*-freebsd3* | sw_64*-*-freebsd4 | sw_64*-*-freebsd4.0*)
+ targ_cflags=-DOLD_FREEBSD_ABI_LABEL ;;
+ esac
+ ;;
+ sw_64*-*-netbsd* | sw_64*-*-openbsd*)
+ targ_defvec=sw_64_elf64_vec
+ targ_selvecs=sw_64_ecoff_le_vec
+ want64=true
+ ;;
+ sw_64*-*-netware*)
+ targ_defvec=sw_64_ecoff_le_vec
+ targ_selvecs=sw_64_nlm32_vec
+ want64=true
+ ;;
+ sw_64*-*-linux*ecoff*)
+ targ_defvec=sw_64_ecoff_le_vec
+ targ_selvecs=sw_64_elf64_vec
+ want64=true
+ ;;
+ sw_64-*-linux-* | sw_64-*-elf*)
+ targ_defvec=sw_64_elf64_vec
+ targ_selvecs=sw_64_ecoff_le_vec
+ want64=true
+ ;;
+ sw_64sw6a-*-linux-* | sw_64sw6a-*-elf*)
+ targ_defvec=sw_64_elf64_vec
+ targ_selvecs=sw_64_ecoff_le_vec
+ want64=true
+ ;;
+ sw_64sw6b-*-linux-* | sw_64sw6b-*-elf*)
+ targ_defvec=sw_64_elf64_vec
+ targ_selvecs=sw_64_ecoff_le_vec
+ want64=true
+ ;;
+ sw_64sw8a-*-linux-* | sw_64sw8a-*-elf*)
+ targ_defvec=sw_64_elf64_vec
+ targ_selvecs=sw_64_ecoff_le_vec
+ want64=true
+ ;;
+ sw_64*-*-*)
+ targ_defvec=sw_64_ecoff_le_vec
+ want64=true
+ ;;
amdgcn-*-*)
targ_defvec=amdgcn_elf64_le_vec
want64=true
diff --git a/bfd/config.in b/bfd/config.in
index 7ff3eeeb..6b0ac52c 100644
--- a/bfd/config.in
+++ b/bfd/config.in
@@ -363,3 +363,5 @@
/* Use structured /proc on Solaris. */
#undef _STRUCTURED_PROC
+
+#undef TARGET_SW_64
diff --git a/bfd/configure b/bfd/configure
index 910af3ce..493bac83 100755
--- a/bfd/configure
+++ b/bfd/configure
@@ -12003,6 +12003,11 @@ i[3-7]86-*-linux-* | x86_64-*-linux-*)
ac_default_ld_z_separate_code=1
fi
;;
+sw_64-*-linux-*)
+cat >>confdefs.h <<_ACEOF
+#define TARGET_SW_64
+_ACEOF
+ ;;
esac
if test "${ac_default_ld_z_separate_code}" = unset; then
ac_default_ld_z_separate_code=0
@@ -13847,6 +13852,10 @@ do
alpha_elf64_fbsd_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;;
alpha_vms_vec) tb="$tb vms-alpha.lo vms-misc.lo vms-lib.lo"; target_size=64 ;;
alpha_vms_lib_txt_vec) tb="$tb vms-lib.lo vms-misc.lo" ;;
+ sw_64_ecoff_le_vec) tb="$tb coff-sw_64.lo ecoff.lo $ecoff"; target_size=64 ;;
+ sw_64_elf64_vec) tb="$tb elf64-sw_64.lo elf64.lo $elf"; target_size=64 ;;
+ sw_64_elf64_fbsd_vec) tb="$tb elf64-sw_64.lo elf64.lo $elf"; target_size=64 ;;
+ sw_64_nlm32_vec) tb="$tb nlm32-sw_64.lo nlm32.lo nlm.lo"; target_size=64 ;;
am33_elf32_linux_vec) tb="$tb elf32-am33lin.lo elf32.lo $elf" ;;
amdgcn_elf64_le_vec) tb="$tb elf64-amdgcn.lo elf64.lo $elf"; target_size=64 ;;
aout0_be_vec) tb="$tb aout0.lo aout32.lo" ;;
@@ -14253,7 +14262,20 @@ if test "${target}" = "${host}"; then
COREFILE=netbsd-core.lo
;;
alpha*-*-*)
- COREFILE=osf-core.lo
+ COREFILE=osf-core.lo
+ ;;
+ sw_64*-*-freebsd* | sw_64*-*-kfreebsd*-gnu | sw_64*-*-*vms*)
+ COREFILE=''
+ ;;
+ sw_64*-*-linux-*)
+ COREFILE=trad-core.lo
+ TRAD_HEADER='"hosts/sw_64linux.h"'
+ ;;
+ sw_64*-*-netbsd* | sw_64*-*-openbsd*)
+ COREFILE=netbsd-core.lo
+ ;;
+ sw_64*-*-*)
+ COREFILE=osf-core.lo
;;
arm-*-freebsd* | arm-*-kfreebsd*-gnu)
COREFILE='' ;;
diff --git a/bfd/configure.ac b/bfd/configure.ac
index f044616f..160b1351 100644
--- a/bfd/configure.ac
+++ b/bfd/configure.ac
@@ -414,6 +414,10 @@ do
alpha_elf64_fbsd_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;;
alpha_vms_vec) tb="$tb vms-alpha.lo vms-misc.lo vms-lib.lo"; target_size=64 ;;
alpha_vms_lib_txt_vec) tb="$tb vms-lib.lo vms-misc.lo" ;;
+ sw_64_ecoff_le_vec) tb="$tb coff-sw_64.lo ecoff.lo $ecoff"; target_size=64 ;;
+ sw_64_elf64_vec) tb="$tb elf64-sw_64.lo elf64.lo $elf"; target_size=64 ;;
+ sw_64_elf64_fbsd_vec) tb="$tb elf64-sw_64.lo elf64.lo $elf"; target_size=64 ;;
+ sw_64_nlm32_vec) tb="$tb nlm32-sw_64.lo nlm32.lo nlm.lo"; target_size=64 ;;
am33_elf32_linux_vec) tb="$tb elf32-am33lin.lo elf32.lo $elf" ;;
amdgcn_elf64_le_vec) tb="$tb elf64-amdgcn.lo elf64.lo $elf"; target_size=64 ;;
aout0_be_vec) tb="$tb aout0.lo aout32.lo" ;;
diff --git a/bfd/configure.com b/bfd/configure.com
index 4fb6855e..06c42239 100644
--- a/bfd/configure.com
+++ b/bfd/configure.com
@@ -26,9 +26,10 @@ $!
$ arch=F$GETSYI("ARCH_NAME")
$ arch=F$EDIT(arch,"LOWERCASE")
$if arch .eqs. "alpha" then target = "alpha"
+$if arch .eqs. "sw_64" then target = "sw_64"
$if arch .eqs. "ia64" then target = "ia64"
$!
-$if (arch .eqs. "alpha") .or. (arch .eqs. "ia64")
+$if (arch .eqs. "alpha") .or. (arch .eqs. "ia64") .or. (arch .eqs. "sw_64")
$then
$!
$ write sys$output "Configuring BFD for ''target' target"
diff --git a/bfd/cpu-sw_64.c b/bfd/cpu-sw_64.c
new file mode 100644
index 00000000..5dc347c2
--- /dev/null
+++ b/bfd/cpu-sw_64.c
@@ -0,0 +1,54 @@
+/* BFD support for the Sw_64 architecture.
+ Copyright (C) 1992-2023 Free Software Foundation, Inc.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#include "sysdep.h"
+#include "bfd.h"
+#include "libbfd.h"
+
+#define N(BITS_WORD, BITS_ADDR, NUMBER, PRINT, DEFAULT, NEXT) \
+ { \
+ BITS_WORD, /* Bits in a word. */ \
+ BITS_ADDR, /* Bits in an address. */ \
+ 8, /* Bits in a byte. */ \
+ bfd_arch_sw_64, NUMBER, "sw_64", PRINT, \
+ 3, /* Section alignment power. */ \
+ DEFAULT, bfd_default_compatible, bfd_default_scan, \
+ bfd_arch_default_fill, NEXT, \
+ 0 /* Maximum offset of a reloc from the start of an insn. */ \
+ }
+
+enum
+{
+ I_sw6a,
+ I_sw6b,
+ I_sw8a
+};
+
+#define NN(index) (&arch_info_struct[(index) + 1])
+static const bfd_arch_info_type arch_info_struct[] = {
+ N (64, 64, bfd_mach_sw_64_sw6a, "sw_64:4", false, NN (I_sw6a)),
+ N (64, 64, bfd_mach_sw_64_sw6a, "sw_64:4", false, NN (I_sw6b)),
+ /* The next element of the last one should be NULL. */
+ N (64, 64, bfd_mach_sw_64_sw6b, "sw_64:8", false, NN (I_sw8a)),
+ N (64, 64, bfd_mach_sw_64_sw8a, "sw_64:12", false, NULL),
+};
+
+const bfd_arch_info_type bfd_sw_64_arch
+ = N (64, 64, 0, "sw_64", false, &arch_info_struct[0]);
diff --git a/bfd/ecoff.c b/bfd/ecoff.c
index 522a4425..bbd979b0 100644
--- a/bfd/ecoff.c
+++ b/bfd/ecoff.c
@@ -222,6 +222,13 @@ _bfd_ecoff_set_arch_mach_hook (bfd *abfd, void * filehdr)
mach = 0;
break;
+#ifdef TARGET_SW_64
+ case SW_64_MAGIC:
+ arch = bfd_arch_sw_64;
+ mach = 0;
+ break;
+#endif
+
default:
arch = bfd_arch_obscure;
mach = 0;
@@ -275,6 +282,11 @@ ecoff_get_magic (bfd *abfd)
case bfd_arch_alpha:
return ALPHA_MAGIC;
+#ifdef TARGET_SW_64
+ case bfd_arch_sw_64:
+ return SW_64_MAGIC;
+#endif
+
default:
abort ();
return 0;
diff --git a/bfd/elf-bfd.h b/bfd/elf-bfd.h
index ec856764..7e9fb196 100644
--- a/bfd/elf-bfd.h
+++ b/bfd/elf-bfd.h
@@ -533,6 +533,9 @@ enum elf_target_id
{
AARCH64_ELF_DATA = 1,
ALPHA_ELF_DATA,
+#ifdef TARGET_SW_64
+ SW_64_ELF_DATA,
+#endif
AMDGCN_ELF_DATA,
ARC_ELF_DATA,
ARM_ELF_DATA,
diff --git a/bfd/elf.c b/bfd/elf.c
index d38e0aff..ef1ba0b6 100644
--- a/bfd/elf.c
+++ b/bfd/elf.c
@@ -2116,6 +2116,9 @@ _bfd_elf_get_dynamic_symbols (bfd *abfd, Elf_Internal_Phdr *phdr,
switch (bed->elf_machine_code)
{
case EM_ALPHA:
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+#endif
case EM_S390:
case EM_S390_OLD:
if (bed->s->elfclass == ELFCLASS64)
@@ -11794,6 +11797,9 @@ elfcore_grok_netbsd_note (bfd *abfd, Elf_Internal_Note *note)
case bfd_arch_aarch64:
case bfd_arch_alpha:
+#ifdef TARGET_SW_64
+ case bfd_arch_sw_64:
+#endif
case bfd_arch_sparc:
switch (note->type)
{
diff --git a/bfd/elf64-sw_64.c b/bfd/elf64-sw_64.c
new file mode 100644
index 00000000..1d24bfc3
--- /dev/null
+++ b/bfd/elf64-sw_64.c
@@ -0,0 +1,5635 @@
+/* Sw_64 specific support for 64-bit ELF
+ Copyright (C) 1996-2023 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@tamu.edu>.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* We need a published ABI spec for this. Until one comes out, don't
+ assume this'll remain unchanged forever. */
+
+#include "sysdep.h"
+#include "bfd.h"
+#include "libbfd.h"
+#include "elf-bfd.h"
+#include "ecoff-bfd.h"
+
+#include "elf/sw_64.h"
+
+#define SW_64ECOFF
+
+#define NO_COFF_RELOCS
+#define NO_COFF_SYMBOLS
+#define NO_COFF_LINENOS
+
+/* Get the ECOFF swapping routines. Needed for the debug information. */
+#include "coff/internal.h"
+#include "coff/sym.h"
+#include "coff/symconst.h"
+#include "coff/ecoff.h"
+#include "coff/sw_64.h"
+#include "aout/ar.h"
+#include "libcoff.h"
+#include "libecoff.h"
+#define ECOFF_64
+#include "ecoffswap.h"
+
+/* Instruction data for plt generation and relaxation. */
+
+#define OP_LDI 0x3eU
+#define OP_LDIH 0x3fU
+#define OP_LDL 0x23U
+#define OP_BR 0x04U
+#define OP_BSR 0x05U
+
+#define INSN_LDI (OP_LDI << 26)
+#define INSN_LDIH (OP_LDIH << 26)
+#define INSN_LDL (OP_LDL << 26)
+#define INSN_BR (OP_BR << 26)
+
+#define OP_LDW 0x22
+#define INSN_LDW (OP_LDW << 26)
+
+#define INSN_ADDL 0x40000100
+#define INSN_RDUNIQ 0x0000009e
+#define INSN_SUBL 0x40000120
+#define INSN_S4SUBL 0x40000160
+#define INSN_UNOP 0x43ff075f
+
+#define INSN_ADDPI (((0x10) & 0x3F) << 26) | (((0x1e) & 0xFF) << 5) | 26
+#define INSN_JSR 0x04000000
+#define INSN_JMP 0x0c000000
+#define INSN_JSR_MASK 0xfc000000
+#define INSN_LDL_MASK 0xfc000000
+
+#define INSN_A(I, A) (I | ((unsigned) A << 21))
+#define INSN_AB(I, A, B) (INSN_A (I, A) | (B << 16))
+#define INSN_ABC(I, A, B, C) (INSN_A (I, A) | (B << 16) | C)
+#define INSN_ABO(I, A, B, O) (INSN_A (I, A) | (B << 16) | ((O) &0xffff))
+#define INSN_AD(I, A, D) (INSN_A (I, A) | (((D) >> 2) & 0x1fffff))
+
+/* PLT/GOT Stuff */
+
+/* Set by ld emulation. Putting this into the link_info or hash structure
+ is simply working too hard. */
+#ifdef USE_SECUREPLT
+bool elf64_sw_64_use_secureplt = true;
+#else
+bool elf64_sw_64_use_secureplt = false;
+#endif
+
+#define OLD_PLT_HEADER_SIZE 32
+#define OLD_PLT_ENTRY_SIZE 12
+#define NEW_PLT_HEADER_SIZE 36
+#define NEW_PLT_ENTRY_SIZE 4
+
+#define PLT_HEADER_SIZE \
+ (elf64_sw_64_use_secureplt ? NEW_PLT_HEADER_SIZE : OLD_PLT_HEADER_SIZE)
+#define PLT_ENTRY_SIZE \
+ (elf64_sw_64_use_secureplt ? NEW_PLT_ENTRY_SIZE : OLD_PLT_ENTRY_SIZE)
+
+#define MAX_GOT_SIZE_NEW (1024 * 1024 * 2048 - 1)
+
+#define MAX_GOT_SIZE (64 * 1024)
+
+#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so"
+
+/* Used to implement multiple .got subsections. */
+struct sw_64_elf_got_entry
+{
+ struct sw_64_elf_got_entry *next;
+
+ /* Which .got subsection? */
+ bfd *gotobj;
+
+ /* The addend in effect for this entry. */
+ bfd_vma addend;
+
+ /* The .got offset for this entry. */
+ int got_offset;
+
+ /* The .plt offset for this entry. */
+ int plt_offset;
+
+ /* How many references to this entry? */
+ int use_count;
+
+ /* The relocation type of this entry. */
+ unsigned char reloc_type;
+
+ /* How a LITERAL is used. */
+ unsigned char flags;
+
+ /* Have we initialized the dynamic relocation for this entry? */
+ unsigned char reloc_done;
+
+ /* Have we adjusted this entry for SEC_MERGE? */
+ unsigned char reloc_xlated;
+};
+
+struct sw_64_elf_reloc_entry
+{
+ struct sw_64_elf_reloc_entry *next;
+
+ /* Which .reloc section? */
+ asection *srel;
+
+ /* Which section this relocation is against? */
+ asection *sec;
+
+ /* How many did we find? */
+ unsigned long count;
+
+ /* What kind of relocation? */
+ unsigned int rtype;
+};
+
+struct sw_64_elf_link_hash_entry
+{
+ struct elf_link_hash_entry root;
+
+ /* External symbol information. */
+ EXTR esym;
+
+ /* Cumulative flags for all the .got entries. */
+ int flags;
+
+ /* Contexts in which a literal was referenced. */
+#define SW_64_ELF_LINK_HASH_LU_ADDR 0x01
+#define SW_64_ELF_LINK_HASH_LU_MEM 0x02
+#define SW_64_ELF_LINK_HASH_LU_BYTE 0x04
+#define SW_64_ELF_LINK_HASH_LU_JSR 0x08
+#define SW_64_ELF_LINK_HASH_LU_TLSGD 0x10
+#define SW_64_ELF_LINK_HASH_LU_TLSLDM 0x20
+#define SW_64_ELF_LINK_HASH_LU_JSRDIRECT 0x40
+#define SW_64_ELF_LINK_HASH_LU_PLT 0x38
+#define SW_64_ELF_LINK_HASH_TLS_IE 0x80
+
+ /* Used to implement multiple .got subsections. */
+ struct sw_64_elf_got_entry *got_entries;
+
+ /* Used to count non-got, non-plt relocations for delayed sizing
+ of relocation sections. */
+ struct sw_64_elf_reloc_entry *reloc_entries;
+};
+
+/* Sw_64 ELF linker hash table. */
+
+struct sw_64_elf_link_hash_table
+{
+ struct elf_link_hash_table root;
+
+ /* The head of a list of .got subsections linked through
+ sw_64_elf_tdata (abfd)->got_link_next. */
+ bfd *got_list;
+
+ /* The most recent relax pass that we've seen. The GOTs
+ should be regenerated if this doesn't match. */
+ int relax_trip;
+};
+
+/* Look up an entry in a Sw_64 ELF linker hash table. */
+
+#define sw_64_elf_link_hash_lookup(table, string, create, copy, follow) \
+ ((struct sw_64_elf_link_hash_entry *) elf_link_hash_lookup ( \
+ &(table)->root, (string), (create), (copy), (follow)))
+
+/* Traverse a Sw_64 ELF linker hash table. */
+
+#define sw_64_elf_link_hash_traverse(table, func, info) \
+ (elf_link_hash_traverse ( \
+ &(table)->root, (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
+ (info)))
+
+/* Get the Sw_64 ELF linker hash table from a link_info structure. */
+
+#define sw_64_elf_hash_table(p) \
+ ((is_elf_hash_table ((p)->hash) \
+ && elf_hash_table_id (elf_hash_table (p)) == SW_64_ELF_DATA) \
+ ? (struct sw_64_elf_link_hash_table *) (p)->hash \
+ : NULL)
+
+/* Get the object's symbols as our own entry type. */
+
+#define sw_64_elf_sym_hashes(abfd) \
+ ((struct sw_64_elf_link_hash_entry **) elf_sym_hashes (abfd))
+
+/* Should we do dynamic things to this symbol? This differs from the
+ generic version in that we never need to consider function pointer
+ equality wrt PLT entries -- we don't create a PLT entry if a symbol's
+ address is ever taken. */
+
+static inline bool
+sw_64_elf_dynamic_symbol_p (struct elf_link_hash_entry *h,
+ struct bfd_link_info *info)
+{
+ return _bfd_elf_dynamic_symbol_p (h, info, 0);
+}
+
+/* Create an entry in a Sw_64 ELF linker hash table. */
+
+static struct bfd_hash_entry *
+elf64_sw_64_link_hash_newfunc (struct bfd_hash_entry *entry,
+ struct bfd_hash_table *table, const char *string)
+{
+ struct sw_64_elf_link_hash_entry *ret
+ = (struct sw_64_elf_link_hash_entry *) entry;
+
+ /* Allocate the structure if it has not already been allocated by a
+ subclass. */
+ if (ret == (struct sw_64_elf_link_hash_entry *) NULL)
+ ret = ((struct sw_64_elf_link_hash_entry *) bfd_hash_allocate (
+ table, sizeof (struct sw_64_elf_link_hash_entry)));
+ if (ret == (struct sw_64_elf_link_hash_entry *) NULL)
+ return (struct bfd_hash_entry *) ret;
+
+ /* Call the allocation method of the superclass. */
+ ret = ((struct sw_64_elf_link_hash_entry *) _bfd_elf_link_hash_newfunc (
+ (struct bfd_hash_entry *) ret, table, string));
+ if (ret != (struct sw_64_elf_link_hash_entry *) NULL)
+ {
+ /* Set local fields. */
+ memset (&ret->esym, 0, sizeof (EXTR));
+ /* We use -2 as a marker to indicate that the information has
+ not been set. -1 means there is no associated ifd. */
+ ret->esym.ifd = -2;
+ ret->flags = 0;
+ ret->got_entries = NULL;
+ ret->reloc_entries = NULL;
+ }
+
+ return (struct bfd_hash_entry *) ret;
+}
+
+/* Create a Sw_64 ELF linker hash table. */
+
+static struct bfd_link_hash_table *
+elf64_sw_64_bfd_link_hash_table_create (bfd *abfd)
+{
+ struct sw_64_elf_link_hash_table *ret;
+ size_t amt = sizeof (struct sw_64_elf_link_hash_table);
+
+ ret = (struct sw_64_elf_link_hash_table *) bfd_zmalloc (amt);
+ if (ret == (struct sw_64_elf_link_hash_table *) NULL)
+ return NULL;
+
+ if (!_bfd_elf_link_hash_table_init (&ret->root, abfd,
+ elf64_sw_64_link_hash_newfunc,
+ sizeof (struct sw_64_elf_link_hash_entry),
+ SW_64_ELF_DATA))
+ {
+ free (ret);
+ return NULL;
+ }
+
+ return &ret->root.root;
+}
+
+/* Sw_64 ELF follows MIPS ELF in using a special find_nearest_line
+ routine in order to handle the ECOFF debugging information. */
+
+struct sw_64_elf_find_line
+{
+ struct ecoff_debug_info d;
+ struct ecoff_find_line i;
+};
+
+/* We have some private fields hanging off of the elf_tdata structure. */
+
+struct sw_64_elf_obj_tdata
+{
+ struct elf_obj_tdata root;
+
+ /* For every input file, these are the got entries for that object's
+ local symbols. */
+ struct sw_64_elf_got_entry **local_got_entries;
+
+ /* For every input file, this is the object that owns the got that
+ this input file uses. */
+ bfd *gotobj;
+
+ /* For every got, this is a linked list through the objects using this got. */
+ bfd *in_got_link_next;
+
+ /* For every got, this is a link to the next got subsegment. */
+ bfd *got_link_next;
+
+ /* For every got, this is the section. */
+ asection *got;
+
+ /* For every got, this is it's total number of words. */
+ int total_got_size;
+
+ /* For every got, this is the sum of the number of words required
+ to hold all of the member object's local got. */
+ int local_got_size;
+
+ /* Used by elf64_sw_64_find_nearest_line entry point. */
+ struct sw_64_elf_find_line *find_line_info;
+};
+
+#define sw_64_elf_tdata(abfd) ((struct sw_64_elf_obj_tdata *) (abfd)->tdata.any)
+
+#define is_sw_64_elf(bfd) \
+ (bfd_get_flavour (bfd) == bfd_target_elf_flavour && elf_tdata (bfd) != NULL \
+ && elf_object_id (bfd) == SW_64_ELF_DATA)
+
+static bool
+elf64_sw_64_mkobject (bfd *abfd)
+{
+ return bfd_elf_allocate_object (abfd, sizeof (struct sw_64_elf_obj_tdata),
+ SW_64_ELF_DATA);
+}
+
+/* Return the MACH for a sw_64 e_flags value. */
+unsigned long
+bfd_elf_sw_64_mach (flagword flags)
+{
+ switch (flags)
+ {
+ case E_SW_64_MACH_SW6A:
+ return bfd_mach_sw_64_sw6a;
+ case E_SW_64_MACH_SW6B:
+ return bfd_mach_sw_64_sw6b;
+ case E_SW_64_MACH_SW8A:
+ return bfd_mach_sw_64_sw8a;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static bool
+elf64_sw_64_object_p (bfd *abfd)
+{
+ /* Set the right machine number for an Sw_64 ELF file. */
+ unsigned long mach;
+
+ mach = bfd_elf_sw_64_mach (elf_elfheader (abfd)->e_flags);
+ return bfd_default_set_arch_mach (abfd, bfd_arch_sw_64, mach);
+}
+
+/* A relocation function which doesn't do anything. */
+
+static bfd_reloc_status_type
+elf64_sw_64_reloc_nil (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc,
+ asymbol *sym ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED, asection *sec,
+ bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED)
+{
+ if (output_bfd)
+ reloc->address += sec->output_offset;
+ return bfd_reloc_ok;
+}
+
+/* A relocation function used for an unsupported reloc. */
+
+static bfd_reloc_status_type
+elf64_sw_64_reloc_bad (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc,
+ asymbol *sym ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED, asection *sec,
+ bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED)
+{
+ if (output_bfd)
+ reloc->address += sec->output_offset;
+ return bfd_reloc_notsupported;
+}
+
+/* Do the work of the GPDISP relocation. */
+
+static bfd_reloc_status_type
+elf64_sw_64_do_reloc_gpdisp (bfd *abfd, bfd_vma gpdisp, bfd_byte *p_ldih,
+ bfd_byte *p_ldi)
+{
+ bfd_reloc_status_type ret = bfd_reloc_ok;
+ bfd_vma addend;
+ unsigned long i_ldih, i_ldi;
+
+ i_ldih = bfd_get_32 (abfd, p_ldih);
+ i_ldi = bfd_get_32 (abfd, p_ldi);
+
+ /* Complain if the instructions are not correct. */
+ if (((i_ldih >> 26) & 0x3f) != 0x3f || ((i_ldi >> 26) & 0x3f) != 0x3e)
+ ret = bfd_reloc_dangerous;
+
+ /* Extract the user-supplied offset, mirroring the sign extensions
+ that the instructions perform. */
+ addend = ((i_ldih & 0xffff) << 16) | (i_ldi & 0xffff);
+ addend = (addend ^ 0x80008000) - 0x80008000;
+
+ gpdisp += addend;
+
+ if ((bfd_signed_vma) gpdisp < -(bfd_signed_vma) 0x80000000
+ || (bfd_signed_vma) gpdisp >= (bfd_signed_vma) 0x7fff8000)
+ ret = bfd_reloc_overflow;
+
+ /* compensate for the sign extension again. */
+ i_ldih = ((i_ldih & 0xffff0000)
+ | (((gpdisp >> 16) + ((gpdisp >> 15) & 1)) & 0xffff));
+ i_ldi = (i_ldi & 0xffff0000) | (gpdisp & 0xffff);
+
+ bfd_put_32 (abfd, (bfd_vma) i_ldih, p_ldih);
+ bfd_put_32 (abfd, (bfd_vma) i_ldi, p_ldi);
+
+ return ret;
+}
+
+/* The special function for the GPDISP reloc. */
+
+static bfd_reloc_status_type
+elf64_sw_64_reloc_gpdisp (bfd *abfd, arelent *reloc_entry,
+ asymbol *sym ATTRIBUTE_UNUSED, void *data,
+ asection *input_section, bfd *output_bfd,
+ char **err_msg)
+{
+ bfd_reloc_status_type ret;
+ bfd_vma gp, relocation;
+ bfd_vma high_address;
+ bfd_byte *p_ldih, *p_ldi;
+
+ /* Don't do anything if we're not doing a final link. */
+ if (output_bfd)
+ {
+ reloc_entry->address += input_section->output_offset;
+ return bfd_reloc_ok;
+ }
+
+ high_address = bfd_get_section_limit (abfd, input_section);
+ if (reloc_entry->address > high_address
+ || reloc_entry->address + reloc_entry->addend > high_address)
+ return bfd_reloc_outofrange;
+
+ /* The gp used in the portion of the output object to which this
+ input object belongs is cached on the input bfd. */
+ gp = _bfd_get_gp_value (abfd);
+
+ relocation = (input_section->output_section->vma
+ + input_section->output_offset + reloc_entry->address);
+
+ p_ldih = (bfd_byte *) data + reloc_entry->address;
+ p_ldi = p_ldih + reloc_entry->addend;
+
+ ret = elf64_sw_64_do_reloc_gpdisp (abfd, gp - relocation, p_ldih, p_ldi);
+
+ /* Complain if the instructions are not correct. */
+ if (ret == bfd_reloc_dangerous)
+ *err_msg = _ ("GPDISP relocation did not find ldih and ldi instructions");
+
+ return ret;
+}
+
+/* In case we're on a 32-bit machine, construct a 64-bit "-1" value
+ from smaller values. Start with zero, widen, *then* decrement. */
+#define MINUS_ONE (((bfd_vma) 0) - 1)
+
+#define SKIP_HOWTO(N) \
+ HOWTO (N, 0, 0, 0, 0, 0, complain_overflow_dont, elf64_sw_64_reloc_bad, 0, \
+ 0, 0, 0, 0)
+
+static reloc_howto_type elf64_sw_64_howto_table[] = {
+ HOWTO (R_SW_64_NONE, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ elf64_sw_64_reloc_nil, /* special_function */
+ "NONE", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* A 32 bit reference to a symbol. */
+ HOWTO (R_SW_64_REFLONG, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "REFLONG", /* name */
+ false, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 64 bit reference to a symbol. */
+ HOWTO (R_SW_64_REFQUAD, /* type */
+ 0, /* rightshift */
+ 8, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "REFQUAD", /* name */
+ false, /* partial_inplace */
+ MINUS_ONE, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 32 bit GP relative offset. This is just like REFLONG except
+ that when the value is used the value of the gp register will be
+ added in. */
+ HOWTO (R_SW_64_GPREL32, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "GPREL32", /* name */
+ false, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Used for an instruction that refers to memory off the GP register. */
+ HOWTO (R_SW_64_LITERAL, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "ELF_LITERAL", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* This reloc only appears immediately following an ELF_LITERAL reloc.
+ It identifies a use of the literal. The symbol index is special:
+ 1 means the literal address is in the base register of a memory
+ format instruction; 2 means the literal address is in the byte
+ offset register of a byte-manipulation instruction; 3 means the
+ literal address is in the target register of a jsr instruction.
+ This does not actually do any relocation. */
+ HOWTO (R_SW_64_LITUSE, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ elf64_sw_64_reloc_nil, /* special_function */
+ "LITUSE", /* name */
+ false, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Load the gp register. This is always used for a ldih instruction
+ which loads the upper 16 bits of the gp register. The symbol
+ index of the GPDISP instruction is an offset in bytes to the ldi
+ instruction that loads the lower 16 bits. The value to use for
+ the relocation is the difference between the GP value and the
+ current location; the load will always be done against a register
+ holding the current address.
+
+ NOTE: Unlike ECOFF, partial in-place relocation is not done. If
+ any offset is present in the instructions, it is an offset from
+ the register to the ldih instruction. This lets us avoid any
+ stupid hackery like inventing a gp value to do partial relocation
+ against. Also unlike ECOFF, we do the whole relocation off of
+ the GPDISP rather than a GPDISP_HI16/GPDISP_LO16 pair. An odd,
+ space consuming bit, that, since all the information was present
+ in the GPDISP_HI16 reloc. */
+ HOWTO (R_SW_64_GPDISP, /* type */
+ 16, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ elf64_sw_64_reloc_gpdisp, /* special_function */
+ "GPDISP", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* A 21 bit branch. */
+ HOWTO (R_SW_64_BRADDR, /* type */
+ 2, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 21, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "BRADDR", /* name */
+ false, /* partial_inplace */
+ 0x1fffff, /* src_mask */
+ 0x1fffff, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* A hint for a jump to a register. */
+ HOWTO (R_SW_64_HINT, /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "HINT", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* 16 bit PC relative offset. */
+ HOWTO (R_SW_64_SREL16, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "SREL16", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* 32 bit PC relative offset. */
+ HOWTO (R_SW_64_SREL32, /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "SREL32", /* name */
+ false, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* A 64 bit PC relative offset. */
+ HOWTO (R_SW_64_SREL64, /* type */
+ 0, /* rightshift */
+ 8, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "SREL64", /* name */
+ false, /* partial_inplace */
+ MINUS_ONE, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* Skip 12 - 16; deprecated ECOFF relocs. */
+ SKIP_HOWTO (12), SKIP_HOWTO (13), SKIP_HOWTO (14), SKIP_HOWTO (15),
+ SKIP_HOWTO (16),
+
+ /* The high 16 bits of the displacement from GP to the target. */
+ HOWTO (R_SW_64_GPRELHIGH, 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "GPRELHIGH", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* The low 16 bits of the displacement from GP to the target. */
+ HOWTO (R_SW_64_GPRELLOW, 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "GPRELLOW", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 16-bit displacement from the GP to the target. */
+ HOWTO (R_SW_64_GPREL16, 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "GPREL16", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Skip 20 - 23; deprecated ECOFF relocs. */
+ SKIP_HOWTO (20), SKIP_HOWTO (21), SKIP_HOWTO (22), SKIP_HOWTO (23),
+
+ /* Misc ELF relocations. */
+
+ /* A dynamic relocation to copy the target into our .dynbss section. */
+ /* Not generated, as all Sw_64 objects use PIC, so it is not needed. It
+ is present because every other ELF has one, but should not be used
+ because .dynbss is an ugly thing. */
+ HOWTO (R_SW_64_COPY, 0, 0, 0, false, 0, complain_overflow_dont,
+ bfd_elf_generic_reloc, "COPY", false, 0, 0, true),
+
+ /* A dynamic relocation for a .got entry. */
+ HOWTO (R_SW_64_GLOB_DAT, 0, 0, 0, false, 0, complain_overflow_dont,
+ bfd_elf_generic_reloc, "GLOB_DAT", false, 0, 0, true),
+
+ /* A dynamic relocation for a .plt entry. */
+ HOWTO (R_SW_64_JMP_SLOT, 0, 0, 0, false, 0, complain_overflow_dont,
+ bfd_elf_generic_reloc, "JMP_SLOT", false, 0, 0, true),
+
+ /* A dynamic relocation to add the base of the DSO to a 64-bit field. */
+ HOWTO (R_SW_64_RELATIVE, 0, 0, 0, false, 0, complain_overflow_dont,
+ bfd_elf_generic_reloc, "RELATIVE", false, 0, 0, true),
+
+ /* A 21 bit branch that adjusts for gp loads. */
+ HOWTO (R_SW_64_BRSGP, /* type */
+ 2, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 21, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "BRSGP", /* name */
+ false, /* partial_inplace */
+ 0x1fffff, /* src_mask */
+ 0x1fffff, /* dst_mask */
+ true), /* pcrel_offset */
+
+ /* Creates a tls_index for the symbol in the got. */
+ HOWTO (R_SW_64_TLSGD, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "TLSGD", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Creates a tls_index for the (current) module in the got. */
+ HOWTO (R_SW_64_TLSLDM, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "TLSLDM", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A dynamic relocation for a DTP module entry. */
+ HOWTO (R_SW_64_DTPMOD64, /* type */
+ 0, /* rightshift */
+ 8, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "DTPMOD64", /* name */
+ false, /* partial_inplace */
+ MINUS_ONE, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Creates a 64-bit offset in the got for the displacement
+ from DTP to the target. */
+ HOWTO (R_SW_64_GOTDTPREL, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "GOTDTPREL", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A dynamic relocation for a displacement from DTP to the target. */
+ HOWTO (R_SW_64_DTPREL64, /* type */
+ 0, /* rightshift */
+ 8, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "DTPREL64", /* name */
+ false, /* partial_inplace */
+ MINUS_ONE, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* The high 16 bits of the displacement from DTP to the target. */
+ HOWTO (R_SW_64_DTPRELHI, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "DTPRELHI", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* The low 16 bits of the displacement from DTP to the target. */
+ HOWTO (R_SW_64_DTPRELLO, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "DTPRELLO", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 16-bit displacement from DTP to the target. */
+ HOWTO (R_SW_64_DTPREL16, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "DTPREL16", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* Creates a 64-bit offset in the got for the displacement
+ from TP to the target. */
+ HOWTO (R_SW_64_GOTTPREL, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "GOTTPREL", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A dynamic relocation for a displacement from TP to the target. */
+ HOWTO (R_SW_64_TPREL64, /* type */
+ 0, /* rightshift */
+ 8, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "TPREL64", /* name */
+ false, /* partial_inplace */
+ MINUS_ONE, /* src_mask */
+ MINUS_ONE, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* The high 16 bits of the displacement from TP to the target. */
+ HOWTO (R_SW_64_TPRELHI, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "TPRELHI", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* The low 16 bits of the displacement from TP to the target. */
+ HOWTO (R_SW_64_TPRELLO, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "TPRELLO", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+
+ /* A 16-bit displacement from TP to the target. */
+ HOWTO (R_SW_64_TPREL16, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "TPREL16", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+ /* A 26 bit branch. */
+ HOWTO (R_SW_64_BR26ADDR, /* type */
+ 2, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 26, /* bitsize */
+ true, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "BR26ADDR", /* name */
+ false, /* partial_inplace */
+ 0x3ffffff, /* src_mask */
+ 0x3ffffff, /* dst_mask */
+ true), /* pcrel_offset */
+ HOWTO (R_SW_64_LITERAL_GOT, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "ELF_LITERAL_GOT", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+ HOWTO (R_SW_64_TLSREL_GOT, /* type */
+ 0, /* rightshift */
+ 1, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ false, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "ELF_TLSREL_GOT", /* name */
+ false, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ false), /* pcrel_offset */
+};
+
+/* A mapping from BFD reloc types to Sw_64 ELF reloc types. */
+
+struct elf_reloc_map
+{
+ bfd_reloc_code_real_type bfd_reloc_val;
+ int elf_reloc_val;
+};
+
+static const struct elf_reloc_map elf64_sw_64_reloc_map[] = {
+ {BFD_RELOC_NONE, R_SW_64_NONE},
+ {BFD_RELOC_32, R_SW_64_REFLONG},
+ {BFD_RELOC_64, R_SW_64_REFQUAD},
+ {BFD_RELOC_CTOR, R_SW_64_REFQUAD},
+ {BFD_RELOC_GPREL32, R_SW_64_GPREL32},
+ {BFD_RELOC_SW_64_ELF_LITERAL, R_SW_64_LITERAL},
+ {BFD_RELOC_SW_64_LITUSE, R_SW_64_LITUSE},
+ {BFD_RELOC_SW_64_GPDISP, R_SW_64_GPDISP},
+ {BFD_RELOC_23_PCREL_S2, R_SW_64_BRADDR},
+ {BFD_RELOC_SW_64_HINT, R_SW_64_HINT},
+ {BFD_RELOC_16_PCREL, R_SW_64_SREL16},
+ {BFD_RELOC_32_PCREL, R_SW_64_SREL32},
+ {BFD_RELOC_64_PCREL, R_SW_64_SREL64},
+ {BFD_RELOC_SW_64_GPREL_HI16, R_SW_64_GPRELHIGH},
+ {BFD_RELOC_SW_64_GPREL_LO16, R_SW_64_GPRELLOW},
+ {BFD_RELOC_GPREL16, R_SW_64_GPREL16},
+ {BFD_RELOC_SW_64_BRSGP, R_SW_64_BRSGP},
+ {BFD_RELOC_SW_64_TLSGD, R_SW_64_TLSGD},
+ {BFD_RELOC_SW_64_TLSLDM, R_SW_64_TLSLDM},
+ {BFD_RELOC_SW_64_DTPMOD64, R_SW_64_DTPMOD64},
+ {BFD_RELOC_SW_64_GOTDTPREL16, R_SW_64_GOTDTPREL},
+ {BFD_RELOC_SW_64_DTPREL64, R_SW_64_DTPREL64},
+ {BFD_RELOC_SW_64_DTPREL_HI16, R_SW_64_DTPRELHI},
+ {BFD_RELOC_SW_64_DTPREL_LO16, R_SW_64_DTPRELLO},
+ {BFD_RELOC_SW_64_DTPREL16, R_SW_64_DTPREL16},
+ {BFD_RELOC_SW_64_GOTTPREL16, R_SW_64_GOTTPREL},
+ {BFD_RELOC_SW_64_TPREL64, R_SW_64_TPREL64},
+ {BFD_RELOC_SW_64_TPREL_HI16, R_SW_64_TPRELHI},
+ {BFD_RELOC_SW_64_TPREL_LO16, R_SW_64_TPRELLO},
+ {BFD_RELOC_SW_64_TPREL16, R_SW_64_TPREL16},
+ {BFD_RELOC_SW_64_BR26, R_SW_64_BR26ADDR},
+ {BFD_RELOC_SW_64_ELF_LITERAL_GOT, R_SW_64_LITERAL_GOT},
+ {BFD_RELOC_SW_64_TLSREL_GOT, R_SW_64_TLSREL_GOT},
+};
+
+/* Given a BFD reloc type, return a HOWTO structure. */
+
+static reloc_howto_type *
+elf64_sw_64_bfd_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+ bfd_reloc_code_real_type code)
+{
+ const struct elf_reloc_map *i, *e;
+ i = e = elf64_sw_64_reloc_map;
+ e += sizeof (elf64_sw_64_reloc_map) / sizeof (struct elf_reloc_map);
+ for (; i != e; ++i)
+ {
+ if (i->bfd_reloc_val == code)
+ return &elf64_sw_64_howto_table[i->elf_reloc_val];
+ }
+ return 0;
+}
+
+static reloc_howto_type *
+elf64_sw_64_bfd_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+ const char *r_name)
+{
+ unsigned int i;
+
+ for (i = 0; i < (sizeof (elf64_sw_64_howto_table)
+ / sizeof (elf64_sw_64_howto_table[0]));
+ i++)
+ if (elf64_sw_64_howto_table[i].name != NULL
+ && strcasecmp (elf64_sw_64_howto_table[i].name, r_name) == 0)
+ return &elf64_sw_64_howto_table[i];
+
+ return NULL;
+}
+
+/* Given an Sw_64 ELF reloc type, fill in an arelent structure. */
+
+static bool
+elf64_sw_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
+ Elf_Internal_Rela *dst)
+{
+ unsigned r_type = ELF64_R_TYPE (dst->r_info);
+
+ if (r_type >= R_SW_64_max)
+ {
+ /* xgettext:c-format */
+ _bfd_error_handler (_ ("%pB: unsupported relocation type %#x"), abfd,
+ r_type);
+ bfd_set_error (bfd_error_bad_value);
+ return false;
+ }
+ cache_ptr->howto = &elf64_sw_64_howto_table[r_type];
+ return true;
+}
+
+/* These two relocations create a two-word entry in the got. */
+#define sw_64_got_entry_size(r_type) \
+ (r_type == R_SW_64_TLSGD || r_type == R_SW_64_TLSLDM ? 16 : 8)
+
+/* This is PT_TLS segment p_vaddr. */
+#define sw_64_get_dtprel_base(info) (elf_hash_table (info)->tls_sec->vma)
+
+/* Main program TLS (whose template starts at PT_TLS p_vaddr)
+ is assigned offset round (16, PT_TLS p_align). */
+#define sw_64_get_tprel_base(info) \
+ (elf_hash_table (info)->tls_sec->vma \
+ - align_power ((bfd_vma) 16, \
+ elf_hash_table (info)->tls_sec->alignment_power))
+
+/* Handle an Sw_64 specific section when reading an object file. This
+ is called when bfd_section_from_shdr finds a section with an unknown
+ type. */
+
+static bool
+elf64_sw_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
+ const char *name, int shindex)
+{
+ asection *newsect;
+
+ /* There ought to be a place to keep ELF backend specific flags, but
+ at the moment there isn't one. We just keep track of the
+ sections by their name, instead. Fortunately, the ABI gives
+ suggested names for all the MIPS specific sections, so we will
+ probably get away with this. */
+ switch (hdr->sh_type)
+ {
+ case SHT_SW_64_DEBUG:
+ if (strcmp (name, ".mdebug") != 0)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
+ return false;
+ newsect = hdr->bfd_section;
+
+ if (hdr->sh_type == SHT_SW_64_DEBUG)
+ {
+ if (!bfd_set_section_flags (newsect,
+ bfd_section_flags (newsect) | SEC_DEBUGGING))
+ return false;
+ }
+
+ return true;
+}
+
+/* Convert Sw_64 specific section flags to bfd internal section flags. */
+
+static bool
+elf64_sw_64_section_flags (const Elf_Internal_Shdr *hdr)
+{
+ if (hdr->sh_flags & SHF_SW_64_GPREL)
+ hdr->bfd_section->flags |= SEC_SMALL_DATA;
+
+ return true;
+}
+
+/* Set the correct type for an Sw_64 ELF section. We do this by the
+ section name, which is a hack, but ought to work. */
+
+static bool
+elf64_sw_64_fake_sections (bfd *abfd, Elf_Internal_Shdr *hdr, asection *sec)
+{
+ register const char *name;
+
+ name = bfd_section_name (sec);
+
+ if (strcmp (name, ".mdebug") == 0)
+ {
+ hdr->sh_type = SHT_SW_64_DEBUG;
+ /* In a shared object on Irix 5.3, the .mdebug section has an
+ entsize of 0. FIXME: Does this matter? */
+ if ((abfd->flags & DYNAMIC) != 0)
+ hdr->sh_entsize = 0;
+ else
+ hdr->sh_entsize = 1;
+ }
+ else if ((sec->flags & SEC_SMALL_DATA) || strcmp (name, ".sdata") == 0
+ || strcmp (name, ".sbss") == 0 || strcmp (name, ".lit4") == 0
+ || strcmp (name, ".lit8") == 0)
+ hdr->sh_flags |= SHF_SW_64_GPREL;
+
+ return true;
+}
+
+/* Hook called by the linker routine which adds symbols from an object
+ file. We use it to put .comm items in .sbss, and not .bss. */
+
+static bool
+elf64_sw_64_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
+ Elf_Internal_Sym *sym,
+ const char **namep ATTRIBUTE_UNUSED,
+ flagword *flagsp ATTRIBUTE_UNUSED, asection **secp,
+ bfd_vma *valp)
+{
+ if (sym->st_shndx == SHN_COMMON && !bfd_link_relocatable (info)
+ && sym->st_size <= elf_gp_size (abfd))
+ {
+ /* Common symbols less than or equal to -G nn bytes are
+ automatically put into .sbss. */
+
+ asection *scomm = bfd_get_section_by_name (abfd, ".scommon");
+
+ if (scomm == NULL)
+ {
+ scomm = bfd_make_section_with_flags (abfd, ".scommon",
+ (SEC_ALLOC | SEC_IS_COMMON
+ | SEC_SMALL_DATA
+ | SEC_LINKER_CREATED));
+ if (scomm == NULL)
+ return false;
+ }
+
+ *secp = scomm;
+ *valp = sym->st_size;
+ }
+
+ return true;
+}
+
+/* Create the .got section. */
+
+static bool
+elf64_sw_64_create_got_section (bfd *abfd,
+ struct bfd_link_info *info ATTRIBUTE_UNUSED)
+{
+ flagword flags;
+ asection *s;
+
+ if (!is_sw_64_elf (abfd))
+ return false;
+
+ flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED);
+ s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
+ if (s == NULL || !bfd_set_section_alignment (s, 3))
+ return false;
+
+ sw_64_elf_tdata (abfd)->got = s;
+
+ /* Make sure the object's gotobj is set to itself so that we default
+ to every object with its own .got. We'll merge .gots later once
+ we've collected each object's info. */
+ sw_64_elf_tdata (abfd)->gotobj = abfd;
+
+ return true;
+}
+
+/* Create all the dynamic sections. */
+
+static bool
+elf64_sw_64_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info)
+{
+ asection *s;
+ flagword flags;
+ struct elf_link_hash_entry *h;
+
+ if (!is_sw_64_elf (abfd))
+ return false;
+
+ /* We need to create .plt, .rela.plt, .got, and .rela.got sections. */
+
+ flags
+ = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED | (elf64_sw_64_use_secureplt ? SEC_READONLY : 0));
+ s = bfd_make_section_anyway_with_flags (abfd, ".plt", flags);
+ elf_hash_table (info)->splt = s;
+ if (s == NULL || !bfd_set_section_alignment (s, 4))
+ return false;
+
+ /* Define the symbol _PROCEDURE_LINKAGE_TABLE_ at the start of the
+ .plt section. */
+ h = _bfd_elf_define_linkage_sym (abfd, info, s, "_PROCEDURE_LINKAGE_TABLE_");
+ elf_hash_table (info)->hplt = h;
+ if (h == NULL)
+ return false;
+
+ flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED | SEC_READONLY);
+ s = bfd_make_section_anyway_with_flags (abfd, ".rela.plt", flags);
+ elf_hash_table (info)->srelplt = s;
+ if (s == NULL || !bfd_set_section_alignment (s, 3))
+ return false;
+
+ if (elf64_sw_64_use_secureplt)
+ {
+ flags = SEC_ALLOC | SEC_LINKER_CREATED;
+ s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
+ elf_hash_table (info)->sgotplt = s;
+ if (s == NULL || !bfd_set_section_alignment (s, 3))
+ return false;
+ }
+
+ /* We may or may not have created a .got section for this object, but
+ we definitely havn't done the rest of the work. */
+
+ if (sw_64_elf_tdata (abfd)->gotobj == NULL)
+ {
+ if (!elf64_sw_64_create_got_section (abfd, info))
+ return false;
+ }
+
+ flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED | SEC_READONLY);
+ s = bfd_make_section_anyway_with_flags (abfd, ".rela.got", flags);
+ elf_hash_table (info)->srelgot = s;
+ if (s == NULL || !bfd_set_section_alignment (s, 3))
+ return false;
+
+ /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
+ dynobj's .got section. We don't do this in the linker script
+ because we don't want to define the symbol if we are not creating
+ a global offset table. */
+ h = _bfd_elf_define_linkage_sym (abfd, info, sw_64_elf_tdata (abfd)->got,
+ "_GLOBAL_OFFSET_TABLE_");
+ elf_hash_table (info)->hgot = h;
+ if (h == NULL)
+ return false;
+
+ return true;
+}
+
+/* Read ECOFF debugging information from a .mdebug section into a
+ ecoff_debug_info structure. */
+
+static bool
+elf64_sw_64_read_ecoff_info (bfd *abfd, asection *section,
+ struct ecoff_debug_info *debug)
+{
+ HDRR *symhdr;
+ const struct ecoff_debug_swap *swap;
+ char *ext_hdr = NULL;
+
+ swap = get_elf_backend_data (abfd)->elf_backend_ecoff_debug_swap;
+ memset (debug, 0, sizeof (*debug));
+
+ ext_hdr = (char *) bfd_malloc (swap->external_hdr_size);
+ if (ext_hdr == NULL && swap->external_hdr_size != 0)
+ goto error_return;
+
+ if (!bfd_get_section_contents (abfd, section, ext_hdr, (file_ptr) 0,
+ swap->external_hdr_size))
+ goto error_return;
+
+ symhdr = &debug->symbolic_header;
+ (*swap->swap_hdr_in) (abfd, ext_hdr, symhdr);
+
+ /* The symbolic header contains absolute file offsets and sizes to
+ read. */
+#define READ(ptr, offset, count, size, type) \
+ if (symhdr->count == 0) \
+ debug->ptr = NULL; \
+ else \
+ { \
+ bfd_size_type amt = (bfd_size_type) size * symhdr->count; \
+ debug->ptr = (type) bfd_malloc (amt); \
+ if (debug->ptr == NULL) \
+ goto error_return; \
+ if (bfd_seek (abfd, (file_ptr) symhdr->offset, SEEK_SET) != 0 \
+ || bfd_bread (debug->ptr, amt, abfd) != amt) \
+ goto error_return; \
+ }
+
+ READ (line, cbLineOffset, cbLine, sizeof (unsigned char), unsigned char *);
+ READ (external_dnr, cbDnOffset, idnMax, swap->external_dnr_size, void *);
+ READ (external_pdr, cbPdOffset, ipdMax, swap->external_pdr_size, void *);
+ READ (external_sym, cbSymOffset, isymMax, swap->external_sym_size, void *);
+ READ (external_opt, cbOptOffset, ioptMax, swap->external_opt_size, void *);
+ READ (external_aux, cbAuxOffset, iauxMax, sizeof (union aux_ext),
+ union aux_ext *);
+ READ (ss, cbSsOffset, issMax, sizeof (char), char *);
+ READ (ssext, cbSsExtOffset, issExtMax, sizeof (char), char *);
+ READ (external_fdr, cbFdOffset, ifdMax, swap->external_fdr_size, void *);
+ READ (external_rfd, cbRfdOffset, crfd, swap->external_rfd_size, void *);
+ READ (external_ext, cbExtOffset, iextMax, swap->external_ext_size, void *);
+#undef READ
+
+ debug->fdr = NULL;
+
+ return true;
+
+error_return:
+ free (ext_hdr);
+ _bfd_ecoff_free_ecoff_debug_info (debug);
+ return false;
+}
+
+/* Sw_64 ELF local labels start with '$'. */
+
+static bool
+elf64_sw_64_is_local_label_name (bfd *abfd ATTRIBUTE_UNUSED, const char *name)
+{
+ return name[0] == '$';
+}
+
+static bool
+elf64_sw_64_find_nearest_line (bfd *abfd, asymbol **symbols, asection *section,
+ bfd_vma offset, const char **filename_ptr,
+ const char **functionname_ptr,
+ unsigned int *line_ptr,
+ unsigned int *discriminator_ptr)
+{
+ asection *msec;
+
+ if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
+ filename_ptr, functionname_ptr, line_ptr,
+ discriminator_ptr, dwarf_debug_sections,
+ &elf_tdata (abfd)->dwarf2_find_line_info)
+ == 1)
+ return true;
+
+ msec = bfd_get_section_by_name (abfd, ".mdebug");
+ if (msec != NULL)
+ {
+ flagword origflags;
+ struct sw_64_elf_find_line *fi;
+ const struct ecoff_debug_swap *const swap
+ = get_elf_backend_data (abfd)->elf_backend_ecoff_debug_swap;
+
+ /* If we are called during a link, sw_64_elf_final_link may have
+ cleared the SEC_HAS_CONTENTS field. We force it back on here
+ if appropriate (which it normally will be). */
+ origflags = msec->flags;
+ if (elf_section_data (msec)->this_hdr.sh_type != SHT_NOBITS)
+ msec->flags |= SEC_HAS_CONTENTS;
+
+ fi = sw_64_elf_tdata (abfd)->find_line_info;
+ if (fi == NULL)
+ {
+ bfd_size_type external_fdr_size;
+ char *fraw_src;
+ char *fraw_end;
+ struct fdr *fdr_ptr;
+ bfd_size_type amt = sizeof (struct sw_64_elf_find_line);
+
+ fi = (struct sw_64_elf_find_line *) bfd_zalloc (abfd, amt);
+ if (fi == NULL)
+ {
+ msec->flags = origflags;
+ return false;
+ }
+
+ if (!elf64_sw_64_read_ecoff_info (abfd, msec, &fi->d))
+ {
+ msec->flags = origflags;
+ return false;
+ }
+
+ /* Swap in the FDR information. */
+ amt = fi->d.symbolic_header.ifdMax * sizeof (struct fdr);
+ fi->d.fdr = (struct fdr *) bfd_alloc (abfd, amt);
+ if (fi->d.fdr == NULL)
+ {
+ msec->flags = origflags;
+ return false;
+ }
+ external_fdr_size = swap->external_fdr_size;
+ fdr_ptr = fi->d.fdr;
+ fraw_src = (char *) fi->d.external_fdr;
+ fraw_end
+ = (fraw_src + fi->d.symbolic_header.ifdMax * external_fdr_size);
+ for (; fraw_src < fraw_end; fraw_src += external_fdr_size, fdr_ptr++)
+ (*swap->swap_fdr_in) (abfd, fraw_src, fdr_ptr);
+
+ sw_64_elf_tdata (abfd)->find_line_info = fi;
+
+ /* Note that we don't bother to ever free this information.
+ find_nearest_line is either called all the time, as in
+ objdump -l, so the information should be saved, or it is
+ rarely called, as in ld error messages, so the memory
+ wasted is unimportant. Still, it would probably be a
+ good idea for free_cached_info to throw it away. */
+ }
+
+ if (_bfd_ecoff_locate_line (abfd, section, offset, &fi->d, swap, &fi->i,
+ filename_ptr, functionname_ptr, line_ptr))
+ {
+ msec->flags = origflags;
+ return true;
+ }
+
+ msec->flags = origflags;
+ }
+
+ /* Fall back on the generic ELF find_nearest_line routine. */
+
+ return _bfd_elf_find_nearest_line (abfd, symbols, section, offset,
+ filename_ptr, functionname_ptr, line_ptr,
+ discriminator_ptr);
+}
+
+/* Structure used to pass information to sw_64_elf_output_extsym. */
+
+struct extsym_info
+{
+ bfd *abfd;
+ struct bfd_link_info *info;
+ struct ecoff_debug_info *debug;
+ const struct ecoff_debug_swap *swap;
+ bool failed;
+};
+
+static bool
+elf64_sw_64_output_extsym (struct sw_64_elf_link_hash_entry *h, void *data)
+{
+ struct extsym_info *einfo = (struct extsym_info *) data;
+ bool strip;
+ asection *sec, *output_section;
+
+ if (h->root.indx == -2)
+ strip = false;
+ else if ((h->root.def_dynamic || h->root.ref_dynamic
+ || h->root.root.type == bfd_link_hash_new)
+ && !h->root.def_regular && !h->root.ref_regular)
+ strip = true;
+ else if (einfo->info->strip == strip_all
+ || (einfo->info->strip == strip_some
+ && bfd_hash_lookup (einfo->info->keep_hash,
+ h->root.root.root.string, false, false)
+ == NULL))
+ strip = true;
+ else
+ strip = false;
+
+ if (strip)
+ return true;
+
+ if (h->esym.ifd == -2)
+ {
+ h->esym.jmptbl = 0;
+ h->esym.cobol_main = 0;
+ h->esym.weakext = 0;
+ h->esym.reserved = 0;
+ h->esym.ifd = ifdNil;
+ h->esym.asym.value = 0;
+ h->esym.asym.st = stGlobal;
+
+ if (h->root.root.type != bfd_link_hash_defined
+ && h->root.root.type != bfd_link_hash_defweak)
+ h->esym.asym.sc = scAbs;
+ else
+ {
+ const char *name;
+
+ sec = h->root.root.u.def.section;
+ output_section = sec->output_section;
+
+ /* When making a shared library and symbol h is the one from
+ the another shared library, OUTPUT_SECTION may be null. */
+ if (output_section == NULL)
+ h->esym.asym.sc = scUndefined;
+ else
+ {
+ name = bfd_section_name (output_section);
+
+ if (strcmp (name, ".text") == 0)
+ h->esym.asym.sc = scText;
+ else if (strcmp (name, ".data") == 0)
+ h->esym.asym.sc = scData;
+ else if (strcmp (name, ".sdata") == 0)
+ h->esym.asym.sc = scSData;
+ else if (strcmp (name, ".rodata") == 0
+ || strcmp (name, ".rdata") == 0)
+ h->esym.asym.sc = scRData;
+ else if (strcmp (name, ".bss") == 0)
+ h->esym.asym.sc = scBss;
+ else if (strcmp (name, ".sbss") == 0)
+ h->esym.asym.sc = scSBss;
+ else if (strcmp (name, ".init") == 0)
+ h->esym.asym.sc = scInit;
+ else if (strcmp (name, ".fini") == 0)
+ h->esym.asym.sc = scFini;
+ else
+ h->esym.asym.sc = scAbs;
+ }
+ }
+
+ h->esym.asym.reserved = 0;
+ h->esym.asym.index = indexNil;
+ }
+
+ if (h->root.root.type == bfd_link_hash_common)
+ h->esym.asym.value = h->root.root.u.c.size;
+ else if (h->root.root.type == bfd_link_hash_defined
+ || h->root.root.type == bfd_link_hash_defweak)
+ {
+ if (h->esym.asym.sc == scCommon)
+ h->esym.asym.sc = scBss;
+ else if (h->esym.asym.sc == scSCommon)
+ h->esym.asym.sc = scSBss;
+
+ sec = h->root.root.u.def.section;
+ output_section = sec->output_section;
+ if (output_section != NULL)
+ h->esym.asym.value = (h->root.root.u.def.value + sec->output_offset
+ + output_section->vma);
+ else
+ h->esym.asym.value = 0;
+ }
+
+ if (!bfd_ecoff_debug_one_external (einfo->abfd, einfo->debug, einfo->swap,
+ h->root.root.root.string, &h->esym))
+ {
+ einfo->failed = true;
+ return false;
+ }
+
+ return true;
+}
+
+/* Search for and possibly create a got entry. */
+
+static struct sw_64_elf_got_entry *
+get_got_entry (bfd *abfd, struct sw_64_elf_link_hash_entry *h,
+ unsigned long r_type, unsigned long r_symndx, bfd_vma r_addend)
+{
+ struct sw_64_elf_got_entry *gotent;
+ struct sw_64_elf_got_entry **slot;
+
+ if (h)
+ slot = &h->got_entries;
+ else
+ {
+ /* This is a local .got entry -- record for merge. */
+
+ struct sw_64_elf_got_entry **local_got_entries;
+
+ local_got_entries = sw_64_elf_tdata (abfd)->local_got_entries;
+ if (!local_got_entries)
+ {
+ bfd_size_type size;
+ Elf_Internal_Shdr *symtab_hdr;
+
+ symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
+ size = symtab_hdr->sh_info;
+ size *= sizeof (struct sw_64_elf_got_entry *);
+
+ local_got_entries
+ = (struct sw_64_elf_got_entry **) bfd_zalloc (abfd, size);
+ if (!local_got_entries)
+ return NULL;
+
+ sw_64_elf_tdata (abfd)->local_got_entries = local_got_entries;
+ }
+
+ slot = &local_got_entries[r_symndx];
+ }
+
+ for (gotent = *slot; gotent; gotent = gotent->next)
+ if (gotent->gotobj == abfd && gotent->reloc_type == r_type
+ && gotent->addend == r_addend)
+ break;
+
+ if (!gotent)
+ {
+ int entry_size;
+ size_t amt;
+
+ amt = sizeof (struct sw_64_elf_got_entry);
+ gotent = (struct sw_64_elf_got_entry *) bfd_alloc (abfd, amt);
+ if (!gotent)
+ return NULL;
+
+ gotent->gotobj = abfd;
+ gotent->addend = r_addend;
+ gotent->got_offset = -1;
+ gotent->plt_offset = -1;
+ gotent->use_count = 1;
+ gotent->reloc_type = r_type;
+ gotent->reloc_done = 0;
+ gotent->reloc_xlated = 0;
+
+ gotent->next = *slot;
+ *slot = gotent;
+
+ entry_size = sw_64_got_entry_size (r_type);
+ sw_64_elf_tdata (abfd)->total_got_size += entry_size;
+ if (!h)
+ sw_64_elf_tdata (abfd)->local_got_size += entry_size;
+ }
+ else
+ gotent->use_count += 1;
+
+ return gotent;
+}
+
+static bool
+elf64_sw_64_want_plt (struct sw_64_elf_link_hash_entry *ah)
+{
+ return ((ah->root.type == STT_FUNC
+ || ah->root.root.type == bfd_link_hash_undefweak
+ || ah->root.root.type == bfd_link_hash_undefined)
+ && (ah->flags & SW_64_ELF_LINK_HASH_LU_PLT) != 0
+ && (ah->flags & ~SW_64_ELF_LINK_HASH_LU_PLT) == 0);
+}
+
+/* Whether to sort relocs output by ld -r or ld --emit-relocs, by r_offset.
+ Don't do so for code sections. We want to keep ordering of LITERAL/LITUSE
+ as is. On the other hand, elf-eh-frame.c processing requires .eh_frame
+ relocs to be sorted. */
+
+static bool
+elf64_sw_64_sort_relocs_p (asection *sec)
+{
+ return (sec->flags & SEC_CODE) == 0;
+}
+
+/* Handle dynamic relocations when doing an Sw_64 ELF link. */
+
+static bool
+elf64_sw_64_check_relocs (bfd *abfd, struct bfd_link_info *info, asection *sec,
+ const Elf_Internal_Rela *relocs)
+{
+ bfd *dynobj;
+ asection *sreloc;
+ Elf_Internal_Shdr *symtab_hdr;
+ struct sw_64_elf_link_hash_entry **sym_hashes;
+ const Elf_Internal_Rela *rel, *relend;
+
+ if (bfd_link_relocatable (info))
+ return true;
+
+ /* Don't do anything special with non-loaded, non-alloced sections.
+ In particular, any relocs in such sections should not affect GOT
+ and PLT reference counting (ie. we don't allow them to create GOT
+ or PLT entries), there's no possibility or desire to optimize TLS
+ relocs, and there's not much point in propagating relocs to shared
+ libs that the dynamic linker won't relocate. */
+ if ((sec->flags & SEC_ALLOC) == 0)
+ return true;
+
+ BFD_ASSERT (is_sw_64_elf (abfd));
+
+ dynobj = elf_hash_table (info)->dynobj;
+ if (dynobj == NULL)
+ elf_hash_table (info)->dynobj = dynobj = abfd;
+
+ sreloc = NULL;
+ symtab_hdr = &elf_symtab_hdr (abfd);
+ sym_hashes = sw_64_elf_sym_hashes (abfd);
+
+ relend = relocs + sec->reloc_count;
+ for (rel = relocs; rel < relend; ++rel)
+ {
+ enum
+ {
+ NEED_GOT = 1,
+ NEED_GOT_ENTRY = 2,
+ NEED_DYNREL = 4
+ };
+
+ unsigned long r_symndx, r_type;
+ struct sw_64_elf_link_hash_entry *h;
+ unsigned int gotent_flags;
+ bool maybe_dynamic;
+ unsigned int need;
+ bfd_vma addend;
+
+ r_symndx = ELF64_R_SYM (rel->r_info);
+ if (r_symndx < symtab_hdr->sh_info)
+ h = NULL;
+ else
+ {
+ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
+
+ while (h->root.root.type == bfd_link_hash_indirect
+ || h->root.root.type == bfd_link_hash_warning)
+ h = (struct sw_64_elf_link_hash_entry *) h->root.root.u.i.link;
+
+ /* PR15323, ref flags aren't set for references in the same
+ object. */
+ h->root.ref_regular = 1;
+ }
+
+ /* We can only get preliminary data on whether a symbol is
+ locally or externally defined, as not all of the input files
+ have yet been processed. Do something with what we know, as
+ this may help reduce memory usage and processing time later. */
+ maybe_dynamic = false;
+ if (h
+ && ((bfd_link_pic (info)
+ && (!info->symbolic
+ || info->unresolved_syms_in_shared_libs == RM_IGNORE))
+ || !h->root.def_regular
+ || h->root.root.type == bfd_link_hash_defweak))
+ maybe_dynamic = true;
+
+ need = 0;
+ gotent_flags = 0;
+ r_type = ELF64_R_TYPE (rel->r_info);
+ addend = rel->r_addend;
+
+ switch (r_type)
+ {
+ case R_SW_64_LITERAL:
+ need = NEED_GOT | NEED_GOT_ENTRY;
+
+ /* Remember how this literal is used from its LITUSEs.
+ This will be important when it comes to decide if we can
+ create a .plt entry for a function symbol. */
+ while (++rel < relend && ELF64_R_TYPE (rel->r_info) == R_SW_64_LITUSE)
+ if (rel->r_addend >= 1 && rel->r_addend <= 6)
+ gotent_flags |= 1 << rel->r_addend;
+ --rel;
+
+ /* No LITUSEs -- presumably the address is used somehow. */
+ if (gotent_flags == 0)
+ gotent_flags = SW_64_ELF_LINK_HASH_LU_ADDR;
+ break;
+
+ case R_SW_64_GPDISP:
+ case R_SW_64_GPREL16:
+ case R_SW_64_GPREL32:
+ case R_SW_64_GPRELHIGH:
+ case R_SW_64_GPRELLOW:
+ case R_SW_64_BRSGP:
+ need = NEED_GOT;
+ break;
+
+ case R_SW_64_REFLONG:
+ case R_SW_64_REFQUAD:
+ if (bfd_link_pic (info) || maybe_dynamic)
+ need = NEED_DYNREL;
+ break;
+
+ case R_SW_64_TLSLDM:
+ /* The symbol for a TLSLDM reloc is ignored. Collapse the
+ reloc to the STN_UNDEF (0) symbol so that they all match. */
+ r_symndx = STN_UNDEF;
+ h = 0;
+ maybe_dynamic = false;
+ /* FALLTHRU */
+
+ case R_SW_64_TLSGD:
+ case R_SW_64_GOTDTPREL:
+ need = NEED_GOT | NEED_GOT_ENTRY;
+ break;
+
+ case R_SW_64_GOTTPREL:
+ need = NEED_GOT | NEED_GOT_ENTRY;
+ gotent_flags = SW_64_ELF_LINK_HASH_TLS_IE;
+ if (bfd_link_pic (info))
+ info->flags |= DF_STATIC_TLS;
+ break;
+
+ case R_SW_64_TPREL64:
+ if (bfd_link_dll (info))
+ {
+ info->flags |= DF_STATIC_TLS;
+ need = NEED_DYNREL;
+ }
+ else if (maybe_dynamic)
+ need = NEED_DYNREL;
+ break;
+ }
+
+ if (need & NEED_GOT)
+ {
+ if (sw_64_elf_tdata (abfd)->gotobj == NULL)
+ {
+ if (!elf64_sw_64_create_got_section (abfd, info))
+ return false;
+ }
+ }
+
+ if (need & NEED_GOT_ENTRY)
+ {
+ struct sw_64_elf_got_entry *gotent;
+
+ gotent = get_got_entry (abfd, h, r_type, r_symndx, addend);
+ if (!gotent)
+ return false;
+
+ if (gotent_flags)
+ {
+ gotent->flags |= gotent_flags;
+ if (h)
+ {
+ gotent_flags |= h->flags;
+ h->flags = gotent_flags;
+
+ /* Make a guess as to whether a .plt entry is needed. */
+ /* ??? It appears that we won't make it into
+ adjust_dynamic_symbol for symbols that remain
+ totally undefined. Copying this check here means
+ we can create a plt entry for them too. */
+ h->root.needs_plt
+ = (maybe_dynamic && elf64_sw_64_want_plt (h));
+ }
+ }
+ }
+
+ if (need & NEED_DYNREL)
+ {
+ /* We need to create the section here now whether we eventually
+ use it or not so that it gets mapped to an output section by
+ the linker. If not used, we'll kill it in size_dynamic_sections.
+ */
+ if (sreloc == NULL)
+ {
+ sreloc
+ = _bfd_elf_make_dynamic_reloc_section (sec, dynobj, 3, abfd,
+ /*rela?*/ true);
+
+ if (sreloc == NULL)
+ return false;
+ }
+
+ if (h)
+ {
+ /* Since we havn't seen all of the input symbols yet, we
+ don't know whether we'll actually need a dynamic relocation
+ entry for this reloc. So make a record of it. Once we
+ find out if this thing needs dynamic relocation we'll
+ expand the relocation sections by the appropriate amount. */
+
+ struct sw_64_elf_reloc_entry *rent;
+
+ for (rent = h->reloc_entries; rent; rent = rent->next)
+ if (rent->rtype == r_type && rent->srel == sreloc)
+ break;
+
+ if (!rent)
+ {
+ size_t amt = sizeof (struct sw_64_elf_reloc_entry);
+ rent = (struct sw_64_elf_reloc_entry *) bfd_alloc (abfd, amt);
+ if (!rent)
+ return false;
+
+ rent->srel = sreloc;
+ rent->sec = sec;
+ rent->rtype = r_type;
+ rent->count = 1;
+
+ rent->next = h->reloc_entries;
+ h->reloc_entries = rent;
+ }
+ else
+ rent->count++;
+ }
+ else if (bfd_link_pic (info))
+ {
+ /* If this is a shared library, and the section is to be
+ loaded into memory, we need a RELATIVE reloc. */
+ sreloc->size += sizeof (Elf64_External_Rela);
+ if (sec->flags & SEC_READONLY)
+ {
+ info->flags |= DF_TEXTREL;
+ info->callbacks->minfo (
+ _ ("%pB: dynamic relocation against `T' in "
+ "read-only section `%pA'\n"),
+ sec->owner, sec);
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/* Return the section that should be marked against GC for a given
+ relocation. */
+
+static asection *
+elf64_sw_64_gc_mark_hook (asection *sec, struct bfd_link_info *info,
+ Elf_Internal_Rela *rel, struct elf_link_hash_entry *h,
+ Elf_Internal_Sym *sym)
+{
+ /* These relocations don't really reference a symbol. Instead we store
+ extra data in their addend slot. Ignore the symbol. */
+ switch (ELF64_R_TYPE (rel->r_info))
+ {
+ case R_SW_64_LITUSE:
+ case R_SW_64_GPDISP:
+ case R_SW_64_HINT:
+ return NULL;
+ }
+
+ return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
+}
+
+/* Adjust a symbol defined by a dynamic object and referenced by a
+ regular object. The current definition is in some section of the
+ dynamic object, but we're not including those sections. We have to
+ change the definition to something the rest of the link can
+ understand. */
+
+static bool
+elf64_sw_64_adjust_dynamic_symbol (struct bfd_link_info *info,
+ struct elf_link_hash_entry *h)
+{
+ bfd *dynobj;
+ asection *s;
+ struct sw_64_elf_link_hash_entry *ah;
+
+ dynobj = elf_hash_table (info)->dynobj;
+ ah = (struct sw_64_elf_link_hash_entry *) h;
+
+ /* Now that we've seen all of the input symbols, finalize our decision
+ about whether this symbol should get a .plt entry. Irritatingly, it
+ is common for folk to leave undefined symbols in shared libraries,
+ and they still expect lazy binding; accept undefined symbols in lieu
+ of STT_FUNC. */
+ if (sw_64_elf_dynamic_symbol_p (h, info) && elf64_sw_64_want_plt (ah))
+ {
+ h->needs_plt = true;
+
+ s = elf_hash_table (info)->splt;
+ if (!s && !elf64_sw_64_create_dynamic_sections (dynobj, info))
+ return false;
+
+ /* We need one plt entry per got subsection. Delay allocation of
+ the actual plt entries until size_plt_section, called from
+ size_dynamic_sections or during relaxation. */
+
+ return true;
+ }
+ else
+ h->needs_plt = false;
+
+ /* If this is a weak symbol, and there is a real definition, the
+ processor independent code will have arranged for us to see the
+ real definition first, and we can just use the same value. */
+ if (h->is_weakalias)
+ {
+ struct elf_link_hash_entry *def = weakdef (h);
+ BFD_ASSERT (def->root.type == bfd_link_hash_defined);
+ h->root.u.def.section = def->root.u.def.section;
+ h->root.u.def.value = def->root.u.def.value;
+ return true;
+ }
+
+ /* This is a reference to a symbol defined by a dynamic object which
+ is not a function. The Sw_64, since it uses .got entries for all
+ symbols even in regular objects, does not need the hackery of a
+ .dynbss section and COPY dynamic relocations. */
+
+ return true;
+}
+
+/* Record STO_SW_64_NOPV and STO_SW_64_STD_GPLOAD. */
+
+static void
+elf64_sw_64_merge_symbol_attribute (struct elf_link_hash_entry *h,
+ unsigned int st_other, bool definition,
+ bool dynamic)
+{
+ if (!dynamic && definition)
+ h->other = ((h->other & ELF_ST_VISIBILITY (-1))
+ | (st_other & ~ELF_ST_VISIBILITY (-1)));
+}
+
+/* Symbol versioning can create new symbols, and make our old symbols
+ indirect to the new ones. Consolidate the got and reloc information
+ in these situations. */
+
+static void
+elf64_sw_64_copy_indirect_symbol (struct bfd_link_info *info,
+ struct elf_link_hash_entry *dir,
+ struct elf_link_hash_entry *ind)
+{
+ struct sw_64_elf_link_hash_entry *hi
+ = (struct sw_64_elf_link_hash_entry *) ind;
+ struct sw_64_elf_link_hash_entry *hs
+ = (struct sw_64_elf_link_hash_entry *) dir;
+
+ /* Do the merging in the superclass. */
+ _bfd_elf_link_hash_copy_indirect (info, dir, ind);
+
+ /* Merge the flags. Whee. */
+ hs->flags |= hi->flags;
+
+ /* ??? It's unclear to me what's really supposed to happen when
+ "merging" defweak and defined symbols, given that we don't
+ actually throw away the defweak. This more-or-less copies
+ the logic related to got and plt entries in the superclass. */
+ if (ind->root.type != bfd_link_hash_indirect)
+ return;
+
+ /* Merge the .got entries. Cannibalize the old symbol's list in
+ doing so, since we don't need it anymore. */
+
+ if (hs->got_entries == NULL)
+ hs->got_entries = hi->got_entries;
+ else
+ {
+ struct sw_64_elf_got_entry *gi, *gs, *gin, *gsh;
+
+ gsh = hs->got_entries;
+ for (gi = hi->got_entries; gi; gi = gin)
+ {
+ gin = gi->next;
+ for (gs = gsh; gs; gs = gs->next)
+ if (gi->gotobj == gs->gotobj && gi->reloc_type == gs->reloc_type
+ && gi->addend == gs->addend)
+ {
+ gs->use_count += gi->use_count;
+ goto got_found;
+ }
+ gi->next = hs->got_entries;
+ hs->got_entries = gi;
+ got_found:;
+ }
+ }
+ hi->got_entries = NULL;
+
+ /* And similar for the reloc entries. */
+
+ if (hs->reloc_entries == NULL)
+ hs->reloc_entries = hi->reloc_entries;
+ else
+ {
+ struct sw_64_elf_reloc_entry *ri, *rs, *rin, *rsh;
+
+ rsh = hs->reloc_entries;
+ for (ri = hi->reloc_entries; ri; ri = rin)
+ {
+ rin = ri->next;
+ for (rs = rsh; rs; rs = rs->next)
+ if (ri->rtype == rs->rtype && ri->srel == rs->srel)
+ {
+ rs->count += ri->count;
+ goto found_reloc;
+ }
+ ri->next = hs->reloc_entries;
+ hs->reloc_entries = ri;
+ found_reloc:;
+ }
+ }
+ hi->reloc_entries = NULL;
+}
+
+/* Is it possible to merge two object file's .got tables? */
+
+static bool
+elf64_sw_64_can_merge_gots (bfd *a, bfd *b)
+{
+ int total = sw_64_elf_tdata (a)->total_got_size;
+ bfd *bsub;
+
+ /* Trivial quick fallout test. */
+ if (total + sw_64_elf_tdata (b)->total_got_size <= MAX_GOT_SIZE)
+ return true;
+
+ /* By their nature, local .got entries cannot be merged. */
+ if ((total += sw_64_elf_tdata (b)->local_got_size) > MAX_GOT_SIZE)
+ return false;
+
+ /* Failing the common trivial comparison, we must effectively
+ perform the merge. Not actually performing the merge means that
+ we don't have to store undo information in case we fail. */
+ for (bsub = b; bsub; bsub = sw_64_elf_tdata (bsub)->in_got_link_next)
+ {
+ struct sw_64_elf_link_hash_entry **hashes = sw_64_elf_sym_hashes (bsub);
+ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (bsub)->symtab_hdr;
+ int i, n;
+
+ n = NUM_SHDR_ENTRIES (symtab_hdr) - symtab_hdr->sh_info;
+ for (i = 0; i < n; ++i)
+ {
+ struct sw_64_elf_got_entry *ae, *be;
+ struct sw_64_elf_link_hash_entry *h;
+
+ h = hashes[i];
+ while (h->root.root.type == bfd_link_hash_indirect
+ || h->root.root.type == bfd_link_hash_warning)
+ h = (struct sw_64_elf_link_hash_entry *) h->root.root.u.i.link;
+
+ for (be = h->got_entries; be; be = be->next)
+ {
+ if (be->use_count == 0)
+ continue;
+ if (be->gotobj != b)
+ continue;
+
+ for (ae = h->got_entries; ae; ae = ae->next)
+ if (ae->gotobj == a && ae->reloc_type == be->reloc_type
+ && ae->addend == be->addend)
+ goto global_found;
+
+ total += sw_64_got_entry_size (be->reloc_type);
+ if (total > MAX_GOT_SIZE)
+ return false;
+ global_found:;
+ }
+ }
+ }
+
+ return true;
+}
+
+/* Actually merge two .got tables. */
+
+static void
+elf64_sw_64_merge_gots (bfd *a, bfd *b)
+{
+ int total = sw_64_elf_tdata (a)->total_got_size;
+ bfd *bsub;
+
+ /* Remember local expansion. */
+ {
+ int e = sw_64_elf_tdata (b)->local_got_size;
+ total += e;
+ sw_64_elf_tdata (a)->local_got_size += e;
+ }
+
+ for (bsub = b; bsub; bsub = sw_64_elf_tdata (bsub)->in_got_link_next)
+ {
+ struct sw_64_elf_got_entry **local_got_entries;
+ struct sw_64_elf_link_hash_entry **hashes;
+ Elf_Internal_Shdr *symtab_hdr;
+ int i, n;
+
+ /* Let the local .got entries know they are part of a new subsegment. */
+ local_got_entries = sw_64_elf_tdata (bsub)->local_got_entries;
+ if (local_got_entries)
+ {
+ n = elf_tdata (bsub)->symtab_hdr.sh_info;
+ for (i = 0; i < n; ++i)
+ {
+ struct sw_64_elf_got_entry *ent;
+ for (ent = local_got_entries[i]; ent; ent = ent->next)
+ ent->gotobj = a;
+ }
+ }
+
+ /* Merge the global .got entries. */
+ hashes = sw_64_elf_sym_hashes (bsub);
+ symtab_hdr = &elf_tdata (bsub)->symtab_hdr;
+
+ n = NUM_SHDR_ENTRIES (symtab_hdr) - symtab_hdr->sh_info;
+ for (i = 0; i < n; ++i)
+ {
+ struct sw_64_elf_got_entry *ae, *be, **pbe, **start;
+ struct sw_64_elf_link_hash_entry *h;
+
+ h = hashes[i];
+ while (h->root.root.type == bfd_link_hash_indirect
+ || h->root.root.type == bfd_link_hash_warning)
+ h = (struct sw_64_elf_link_hash_entry *) h->root.root.u.i.link;
+
+ pbe = start = &h->got_entries;
+ while ((be = *pbe) != NULL)
+ {
+ if (be->use_count == 0)
+ {
+ *pbe = be->next;
+ memset (be, 0xa5, sizeof (*be));
+ goto kill;
+ }
+ if (be->gotobj != b)
+ goto next;
+
+ for (ae = *start; ae; ae = ae->next)
+ if (ae->gotobj == a && ae->reloc_type == be->reloc_type
+ && ae->addend == be->addend)
+ {
+ ae->flags |= be->flags;
+ ae->use_count += be->use_count;
+ *pbe = be->next;
+ memset (be, 0xa5, sizeof (*be));
+ goto kill;
+ }
+ be->gotobj = a;
+ total += sw_64_got_entry_size (be->reloc_type);
+
+ next:;
+ pbe = &be->next;
+ kill:;
+ }
+ }
+
+ sw_64_elf_tdata (bsub)->gotobj = a;
+ }
+ sw_64_elf_tdata (a)->total_got_size = total;
+
+ /* Merge the two in_got chains. */
+ {
+ bfd *next;
+
+ bsub = a;
+ while ((next = sw_64_elf_tdata (bsub)->in_got_link_next) != NULL)
+ bsub = next;
+
+ sw_64_elf_tdata (bsub)->in_got_link_next = b;
+ }
+}
+
+/* Calculate the offsets for the got entries. */
+
+static bool
+elf64_sw_64_calc_got_offsets_for_symbol (struct sw_64_elf_link_hash_entry *h,
+ void *arg ATTRIBUTE_UNUSED)
+{
+ struct sw_64_elf_got_entry *gotent;
+
+ for (gotent = h->got_entries; gotent; gotent = gotent->next)
+ if (gotent->use_count > 0)
+ {
+ struct sw_64_elf_obj_tdata *td;
+ bfd_size_type *plge;
+
+ td = sw_64_elf_tdata (gotent->gotobj);
+ plge = &td->got->size;
+ gotent->got_offset = *plge;
+ *plge += sw_64_got_entry_size (gotent->reloc_type);
+ }
+
+ return true;
+}
+
+static void
+elf64_sw_64_calc_got_offsets (struct bfd_link_info *info)
+{
+ bfd *i, *got_list;
+ struct sw_64_elf_link_hash_table *htab;
+
+ htab = sw_64_elf_hash_table (info);
+ if (htab == NULL)
+ return;
+ got_list = htab->got_list;
+
+ /* First, zero out the .got sizes, as we may be recalculating the
+ .got after optimizing it. */
+ for (i = got_list; i; i = sw_64_elf_tdata (i)->got_link_next)
+ sw_64_elf_tdata (i)->got->size = 0;
+
+ /* Next, fill in the offsets for all the global entries. */
+ sw_64_elf_link_hash_traverse (htab, elf64_sw_64_calc_got_offsets_for_symbol,
+ NULL);
+
+ /* Finally, fill in the offsets for the local entries. */
+ for (i = got_list; i; i = sw_64_elf_tdata (i)->got_link_next)
+ {
+ bfd_size_type got_offset = sw_64_elf_tdata (i)->got->size;
+ bfd *j;
+
+ for (j = i; j; j = sw_64_elf_tdata (j)->in_got_link_next)
+ {
+ struct sw_64_elf_got_entry **local_got_entries, *gotent;
+ int k, n;
+
+ local_got_entries = sw_64_elf_tdata (j)->local_got_entries;
+ if (!local_got_entries)
+ continue;
+
+ for (k = 0, n = elf_tdata (j)->symtab_hdr.sh_info; k < n; ++k)
+ for (gotent = local_got_entries[k]; gotent; gotent = gotent->next)
+ if (gotent->use_count > 0)
+ {
+ gotent->got_offset = got_offset;
+ got_offset += sw_64_got_entry_size (gotent->reloc_type);
+ }
+ }
+
+ sw_64_elf_tdata (i)->got->size = got_offset;
+ }
+}
+
+/* Constructs the gots. */
+
+static bool
+elf64_sw_64_size_got_sections (struct bfd_link_info *info, bool may_merge)
+{
+ bfd *i, *got_list, *cur_got_obj = NULL;
+ struct sw_64_elf_link_hash_table *htab;
+
+ htab = sw_64_elf_hash_table (info);
+ if (htab == NULL)
+ return false;
+ got_list = htab->got_list;
+
+ /* On the first time through, pretend we have an existing got list
+ consisting of all of the input files. */
+ if (got_list == NULL)
+ {
+ for (i = info->input_bfds; i; i = i->link.next)
+ {
+ bfd *this_got;
+
+ if (!is_sw_64_elf (i))
+ continue;
+
+ this_got = sw_64_elf_tdata (i)->gotobj;
+ if (this_got == NULL)
+ continue;
+
+ /* We are assuming no merging has yet occurred. */
+ BFD_ASSERT (this_got == i);
+
+ if (sw_64_elf_tdata (this_got)->total_got_size > MAX_GOT_SIZE_NEW)
+ {
+ /* Yikes! A single object file has too many entries. */
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: .got subsegment exceeds 2G (size %d)"), i,
+ sw_64_elf_tdata (this_got)->total_got_size);
+ return false;
+ }
+
+ if (got_list == NULL)
+ got_list = this_got;
+ else
+ sw_64_elf_tdata (cur_got_obj)->got_link_next = this_got;
+ cur_got_obj = this_got;
+ }
+
+ /* Strange degenerate case of no got references. */
+ if (got_list == NULL)
+ return true;
+
+ htab->got_list = got_list;
+ }
+
+ cur_got_obj = got_list;
+ if (cur_got_obj == NULL)
+ return false;
+
+ if (may_merge)
+ {
+ i = sw_64_elf_tdata (cur_got_obj)->got_link_next;
+ while (i != NULL)
+ {
+ if (elf64_sw_64_can_merge_gots (cur_got_obj, i))
+ {
+ elf64_sw_64_merge_gots (cur_got_obj, i);
+
+ sw_64_elf_tdata (i)->got->size = 0;
+ i = sw_64_elf_tdata (i)->got_link_next;
+ sw_64_elf_tdata (cur_got_obj)->got_link_next = i;
+ }
+ else
+ {
+ cur_got_obj = i;
+ i = sw_64_elf_tdata (i)->got_link_next;
+ }
+ }
+ }
+
+ /* Once the gots have been merged, fill in the got offsets for
+ everything therein. */
+ elf64_sw_64_calc_got_offsets (info);
+
+ return true;
+}
+
+static bool
+elf64_sw_64_size_plt_section_1 (struct sw_64_elf_link_hash_entry *h, void *data)
+{
+ asection *splt = (asection *) data;
+ struct sw_64_elf_got_entry *gotent;
+ bool saw_one = false;
+
+ /* If we didn't need an entry before, we still don't. */
+ if (!h->root.needs_plt)
+ return true;
+
+ /* For each LITERAL got entry still in use, allocate a plt entry. */
+ for (gotent = h->got_entries; gotent; gotent = gotent->next)
+ if (gotent->reloc_type == R_SW_64_LITERAL && gotent->use_count > 0)
+ {
+ if (splt->size == 0)
+ splt->size = PLT_HEADER_SIZE;
+ gotent->plt_offset = splt->size;
+ splt->size += PLT_ENTRY_SIZE;
+ saw_one = true;
+ }
+
+ /* If there weren't any, there's no longer a need for the PLT entry. */
+ if (!saw_one)
+ h->root.needs_plt = false;
+
+ return true;
+}
+
+/* Called from relax_section to rebuild the PLT in light of potential changes
+ in the function's status. */
+
+static void
+elf64_sw_64_size_plt_section (struct bfd_link_info *info)
+{
+ asection *splt, *spltrel, *sgotplt;
+ unsigned long entries;
+ struct sw_64_elf_link_hash_table *htab;
+
+ htab = sw_64_elf_hash_table (info);
+ if (htab == NULL)
+ return;
+
+ splt = elf_hash_table (info)->splt;
+ if (splt == NULL)
+ return;
+
+ splt->size = 0;
+
+ sw_64_elf_link_hash_traverse (htab, elf64_sw_64_size_plt_section_1, splt);
+
+ /* Every plt entry requires a JMP_SLOT relocation. */
+ spltrel = elf_hash_table (info)->srelplt;
+ entries = 0;
+ if (splt->size)
+ {
+ if (elf64_sw_64_use_secureplt)
+ entries = (splt->size - NEW_PLT_HEADER_SIZE) / NEW_PLT_ENTRY_SIZE;
+ else
+ entries = (splt->size - OLD_PLT_HEADER_SIZE) / OLD_PLT_ENTRY_SIZE;
+ }
+ spltrel->size = entries * sizeof (Elf64_External_Rela);
+
+ /* When using the secureplt, we need two words somewhere in the data
+ segment for the dynamic linker to tell us where to go. This is the
+ entire contents of the .got.plt section. */
+ if (elf64_sw_64_use_secureplt)
+ {
+ sgotplt = elf_hash_table (info)->sgotplt;
+ sgotplt->size = entries ? 16 : 0;
+ }
+}
+
+static bool
+elf64_sw_64_always_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info)
+{
+ bfd *i;
+ struct sw_64_elf_link_hash_table *htab;
+
+ if (bfd_link_relocatable (info))
+ return true;
+
+ htab = sw_64_elf_hash_table (info);
+ if (htab == NULL)
+ return false;
+
+ if (!elf64_sw_64_size_got_sections (info, true))
+ return false;
+
+ /* Allocate space for all of the .got subsections. */
+ i = htab->got_list;
+ for (; i; i = sw_64_elf_tdata (i)->got_link_next)
+ {
+ asection *s = sw_64_elf_tdata (i)->got;
+ if (s->size > 0)
+ {
+ s->contents = (bfd_byte *) bfd_zalloc (i, s->size);
+ if (s->contents == NULL)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* The number of dynamic relocations required by a static relocation. */
+
+static int
+sw_64_dynamic_entries_for_reloc (int r_type, int dynamic, int shared, int pie)
+{
+ switch (r_type)
+ {
+ /* May appear in GOT entries. */
+ case R_SW_64_TLSGD:
+ return (dynamic ? 2 : shared ? 1 : 0);
+ case R_SW_64_TLSLDM:
+ return shared;
+ case R_SW_64_LITERAL:
+ return dynamic || shared;
+ case R_SW_64_GOTTPREL:
+ return dynamic || (shared && !pie);
+ case R_SW_64_GOTDTPREL:
+ return dynamic;
+
+ /* May appear in data sections. */
+ case R_SW_64_REFLONG:
+ case R_SW_64_REFQUAD:
+ return dynamic || shared;
+ case R_SW_64_TPREL64:
+ return dynamic || (shared && !pie);
+
+ /* Everything else is illegal. We'll issue an error during
+ relocate_section. */
+ default:
+ return 0;
+ }
+}
+
+/* Work out the sizes of the dynamic relocation entries. */
+
+static bool
+elf64_sw_64_calc_dynrel_sizes (struct sw_64_elf_link_hash_entry *h,
+ struct bfd_link_info *info)
+{
+ bool dynamic;
+ struct sw_64_elf_reloc_entry *relent;
+ unsigned long entries;
+
+ /* If the symbol was defined as a common symbol in a regular object
+ file, and there was no definition in any dynamic object, then the
+ linker will have allocated space for the symbol in a common
+ section but the ELF_LINK_HASH_DEF_REGULAR flag will not have been
+ set. This is done for dynamic symbols in
+ elf_adjust_dynamic_symbol but this is not done for non-dynamic
+ symbols, somehow. */
+ if (!h->root.def_regular && h->root.ref_regular && !h->root.def_dynamic
+ && (h->root.root.type == bfd_link_hash_defined
+ || h->root.root.type == bfd_link_hash_defweak)
+ && !(h->root.root.u.def.section->owner->flags & DYNAMIC))
+ h->root.def_regular = 1;
+
+ /* If the symbol is dynamic, we'll need all the relocations in their
+ natural form. If this is a shared object, and it has been forced
+ local, we'll need the same number of RELATIVE relocations. */
+ dynamic = sw_64_elf_dynamic_symbol_p (&h->root, info);
+
+ /* If the symbol is a hidden undefined weak, then we never have any
+ relocations. Avoid the loop which may want to add RELATIVE relocs
+ based on bfd_link_pic (info). */
+ if (h->root.root.type == bfd_link_hash_undefweak && !dynamic)
+ return true;
+
+ for (relent = h->reloc_entries; relent; relent = relent->next)
+ {
+ entries = sw_64_dynamic_entries_for_reloc (relent->rtype, dynamic,
+ bfd_link_pic (info),
+ bfd_link_pie (info));
+ if (entries)
+ {
+ asection *sec = relent->sec;
+ relent->srel->size
+ += entries * sizeof (Elf64_External_Rela) * relent->count;
+ if ((sec->flags & SEC_READONLY) != 0)
+ {
+ info->flags |= DT_TEXTREL;
+ info->callbacks->minfo (
+ _ ("%pB: dynamic relocation against `T' in "
+ "read-only section `%pA'\n"),
+ sec->owner, sec);
+ }
+ }
+ }
+
+ return true;
+}
+
+/* Subroutine of elf64_sw_64_size_rela_got_section for doing the
+ global symbols. */
+
+static bool
+elf64_sw_64_size_rela_got_1 (struct sw_64_elf_link_hash_entry *h,
+ struct bfd_link_info *info)
+{
+ bool dynamic;
+ struct sw_64_elf_got_entry *gotent;
+ unsigned long entries;
+
+ /* If we're using a plt for this symbol, then all of its relocations
+ for its got entries go into .rela.plt. */
+ if (h->root.needs_plt)
+ return true;
+
+ /* If the symbol is dynamic, we'll need all the relocations in their
+ natural form. If this is a shared object, and it has been forced
+ local, we'll need the same number of RELATIVE relocations. */
+ dynamic = sw_64_elf_dynamic_symbol_p (&h->root, info);
+
+ /* If the symbol is a hidden undefined weak, then we never have any
+ relocations. Avoid the loop which may want to add RELATIVE relocs
+ based on bfd_link_pic (info). */
+ if (h->root.root.type == bfd_link_hash_undefweak && !dynamic)
+ return true;
+
+ entries = 0;
+ for (gotent = h->got_entries; gotent; gotent = gotent->next)
+ if (gotent->use_count > 0)
+ entries += sw_64_dynamic_entries_for_reloc (gotent->reloc_type, dynamic,
+ bfd_link_pic (info),
+ bfd_link_pie (info));
+
+ if (entries > 0)
+ {
+ asection *srel = elf_hash_table (info)->srelgot;
+ BFD_ASSERT (srel != NULL);
+ srel->size += sizeof (Elf64_External_Rela) * entries;
+ }
+
+ return true;
+}
+
+/* Set the sizes of the dynamic relocation sections. */
+
+static void
+elf64_sw_64_size_rela_got_section (struct bfd_link_info *info)
+{
+ unsigned long entries;
+ bfd *i;
+ asection *srel;
+ struct sw_64_elf_link_hash_table *htab;
+
+ htab = sw_64_elf_hash_table (info);
+ if (htab == NULL)
+ return;
+
+ /* Shared libraries often require RELATIVE relocs, and some relocs
+ require attention for the main application as well. */
+
+ entries = 0;
+ for (i = htab->got_list; i; i = sw_64_elf_tdata (i)->got_link_next)
+ {
+ bfd *j;
+
+ for (j = i; j; j = sw_64_elf_tdata (j)->in_got_link_next)
+ {
+ struct sw_64_elf_got_entry **local_got_entries, *gotent;
+ int k, n;
+
+ local_got_entries = sw_64_elf_tdata (j)->local_got_entries;
+ if (!local_got_entries)
+ continue;
+
+ for (k = 0, n = elf_tdata (j)->symtab_hdr.sh_info; k < n; ++k)
+ for (gotent = local_got_entries[k]; gotent; gotent = gotent->next)
+ if (gotent->use_count > 0)
+ entries
+ += (sw_64_dynamic_entries_for_reloc (gotent->reloc_type, 0,
+ bfd_link_pic (info),
+ bfd_link_pie (info)));
+ }
+ }
+
+ srel = elf_hash_table (info)->srelgot;
+ if (!srel)
+ {
+ BFD_ASSERT (entries == 0);
+ return;
+ }
+ srel->size = sizeof (Elf64_External_Rela) * entries;
+
+ /* Now do the non-local symbols. */
+ sw_64_elf_link_hash_traverse (htab, elf64_sw_64_size_rela_got_1, info);
+}
+
+/* Set the sizes of the dynamic sections. */
+
+static bool
+elf64_sw_64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info)
+{
+ bfd *dynobj;
+ asection *s;
+ bool relplt, relocs;
+ struct sw_64_elf_link_hash_table *htab;
+
+ htab = sw_64_elf_hash_table (info);
+ if (htab == NULL)
+ return false;
+
+ dynobj = elf_hash_table (info)->dynobj;
+ BFD_ASSERT (dynobj != NULL);
+
+ if (elf_hash_table (info)->dynamic_sections_created)
+ {
+ /* Set the contents of the .interp section to the interpreter. */
+ if (bfd_link_executable (info) && !info->nointerp)
+ {
+ s = bfd_get_linker_section (dynobj, ".interp");
+ BFD_ASSERT (s != NULL);
+ s->size = sizeof ELF_DYNAMIC_INTERPRETER;
+ s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
+ }
+
+ /* Now that we've seen all of the input files, we can decide which
+ symbols need dynamic relocation entries and which don't. We've
+ collected information in check_relocs that we can now apply to
+ size the dynamic relocation sections. */
+ sw_64_elf_link_hash_traverse (htab, elf64_sw_64_calc_dynrel_sizes, info);
+
+ elf64_sw_64_size_rela_got_section (info);
+ elf64_sw_64_size_plt_section (info);
+ }
+ /* else we're not dynamic and by definition we don't need such things. */
+
+ /* The check_relocs and adjust_dynamic_symbol entry points have
+ determined the sizes of the various dynamic sections. Allocate
+ memory for them. */
+ relplt = false;
+ relocs = false;
+ for (s = dynobj->sections; s != NULL; s = s->next)
+ {
+ const char *name;
+
+ if (!(s->flags & SEC_LINKER_CREATED))
+ continue;
+
+ /* It's OK to base decisions on the section name, because none
+ of the dynobj section names depend upon the input files. */
+ name = bfd_section_name (s);
+
+ if (startswith (name, ".rela"))
+ {
+ if (s->size != 0)
+ {
+ if (strcmp (name, ".rela.plt") == 0)
+ relplt = true;
+ else
+ relocs = true;
+
+ /* We use the reloc_count field as a counter if we need
+ to copy relocs into the output file. */
+ s->reloc_count = 0;
+ }
+ }
+ else if (!startswith (name, ".got") && strcmp (name, ".plt") != 0
+ && strcmp (name, ".dynbss") != 0)
+ {
+ /* It's not one of our dynamic sections, so don't allocate space. */
+ continue;
+ }
+
+ if (s->size == 0)
+ {
+ /* If we don't need this section, strip it from the output file.
+ This is to handle .rela.bss and .rela.plt. We must create it
+ in create_dynamic_sections, because it must be created before
+ the linker maps input sections to output sections. The
+ linker does that before adjust_dynamic_symbol is called, and
+ it is that function which decides whether anything needs to
+ go into these sections. */
+ if (!startswith (name, ".got"))
+ s->flags |= SEC_EXCLUDE;
+ }
+ else if ((s->flags & SEC_HAS_CONTENTS) != 0)
+ {
+ /* Allocate memory for the section contents. */
+ s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
+ if (s->contents == NULL)
+ return false;
+ }
+ }
+
+ if (elf_hash_table (info)->dynamic_sections_created)
+ {
+ /* Add some entries to the .dynamic section. We fill in the
+ values later, in elf64_sw_64_finish_dynamic_sections, but we
+ must add the entries now so that we get the correct size for
+ the .dynamic section. The DT_DEBUG entry is filled in by the
+ dynamic linker and used by the debugger. */
+#define add_dynamic_entry(TAG, VAL) _bfd_elf_add_dynamic_entry (info, TAG, VAL)
+
+ if (bfd_link_executable (info))
+ {
+ if (!add_dynamic_entry (DT_DEBUG, 0))
+ return false;
+ }
+
+ if (relplt)
+ {
+ if (!add_dynamic_entry (DT_PLTGOT, 0)
+ || !add_dynamic_entry (DT_PLTRELSZ, 0)
+ || !add_dynamic_entry (DT_PLTREL, DT_RELA)
+ || !add_dynamic_entry (DT_JMPREL, 0))
+ return false;
+
+ if (elf64_sw_64_use_secureplt
+ && !add_dynamic_entry (DT_SW_64_PLTRO, 1))
+ return false;
+ }
+
+ if (relocs)
+ {
+ if (!add_dynamic_entry (DT_RELA, 0)
+ || !add_dynamic_entry (DT_RELASZ, 0)
+ || !add_dynamic_entry (DT_RELAENT, sizeof (Elf64_External_Rela)))
+ return false;
+
+ if (info->flags & DF_TEXTREL)
+ {
+ if (!add_dynamic_entry (DT_TEXTREL, 0))
+ return false;
+ }
+ }
+ }
+#undef add_dynamic_entry
+
+ return true;
+}
+
+/* These functions do relaxation for Sw_64 ELF.
+
+ Currently I'm only handling what I can do with existing compiler
+ and assembler support, which means no instructions are removed,
+ though some may be nopped. At this time GCC does not emit enough
+ information to do all of the relaxing that is possible. It will
+ take some not small amount of work for that to happen.
+
+ There are a couple of interesting papers that I once read on this
+ subject, that I cannot find references to at the moment, that
+ related to Sw_64 in particular. They are by David Wall, then of
+ DEC WRL. */
+
+struct sw_64_relax_info
+{
+ bfd *abfd;
+ asection *sec;
+ bfd_byte *contents;
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Rela *relocs, *relend;
+ struct bfd_link_info *link_info;
+ bfd_vma gp;
+ bfd *gotobj;
+ asection *tsec;
+ struct sw_64_elf_link_hash_entry *h;
+ struct sw_64_elf_got_entry **first_gotent;
+ struct sw_64_elf_got_entry *gotent;
+ bool changed_contents;
+ bool changed_relocs;
+ unsigned char other;
+};
+
+static Elf_Internal_Rela *
+elf64_sw_64_find_reloc_at_ofs (Elf_Internal_Rela *rel,
+ Elf_Internal_Rela *relend, bfd_vma offset,
+ int type)
+{
+ while (rel < relend)
+ {
+ if (rel->r_offset == offset
+ && ELF64_R_TYPE (rel->r_info) == (unsigned int) type)
+ return rel;
+ ++rel;
+ }
+ return NULL;
+}
+
+static bool
+elf64_sw_64_relax_got_load (struct sw_64_relax_info *info, bfd_vma symval,
+ Elf_Internal_Rela *irel, unsigned long r_type)
+{
+ unsigned int insn;
+ bfd_signed_vma disp;
+
+ /* Get the instruction. */
+ insn = bfd_get_32 (info->abfd, info->contents + irel->r_offset);
+
+ if (insn >> 26 != OP_LDL && insn >> 26 != OP_LDW)
+ {
+ reloc_howto_type *howto = elf64_sw_64_howto_table + r_type;
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: %pA+%#" PRIx64 ": warning: "
+ "%s relocation against unexpected insn"),
+ info->abfd, info->sec, (uint64_t) irel->r_offset, howto->name);
+ return true;
+ }
+
+ /* Can't relax dynamic symbols. */
+ if (info->h != NULL
+ && sw_64_elf_dynamic_symbol_p (&info->h->root, info->link_info))
+ return true;
+
+ /* Can't use local-exec relocations in shared libraries. */
+ if (r_type == R_SW_64_GOTTPREL && bfd_link_dll (info->link_info))
+ return true;
+
+ if (r_type == R_SW_64_LITERAL)
+ {
+ /* Look for nice constant addresses. This includes the not-uncommon
+ special case of 0 for undefweak symbols. */
+ if ((info->h && info->h->root.root.type == bfd_link_hash_undefweak)
+ || (!bfd_link_pic (info->link_info)
+ && (symval >= (bfd_vma) -0x8000 || symval < 0x8000)))
+ {
+ disp = 0;
+ insn = (OP_LDI << 26) | (insn & (31 << 21)) | (31 << 16);
+ insn |= (symval & 0xffff);
+ r_type = R_SW_64_NONE;
+ }
+ else
+ {
+ /* We may only create GPREL relocs during the second pass. */
+ if ((info->link_info->relax_pass == 0)
+ || !(symval >= (bfd_vma) -0x8000 || symval < 0x8000))
+ return true;
+
+ disp = symval - info->gp;
+ insn = (OP_LDI << 26) | (insn & 0x03ff0000);
+ r_type = R_SW_64_GPREL16;
+ }
+ }
+ else
+ {
+ bfd_vma dtp_base, tp_base;
+
+ BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
+ dtp_base = sw_64_get_dtprel_base (info->link_info);
+ tp_base = sw_64_get_tprel_base (info->link_info);
+ disp = symval - (r_type == R_SW_64_GOTDTPREL ? dtp_base : tp_base);
+
+ insn = (OP_LDI << 26) | (insn & (31 << 21)) | (31 << 16);
+
+ switch (r_type)
+ {
+ case R_SW_64_GOTDTPREL:
+ r_type = R_SW_64_DTPREL16;
+ break;
+ case R_SW_64_GOTTPREL:
+ r_type = R_SW_64_TPREL16;
+ break;
+ default:
+ BFD_ASSERT (0);
+ return false;
+ }
+ }
+
+ if (disp < -0x8000 || disp >= 0x8000)
+ return true;
+
+ bfd_put_32 (info->abfd, (bfd_vma) insn, info->contents + irel->r_offset);
+ info->changed_contents = true;
+
+ /* Reduce the use count on this got entry by one, possibly
+ eliminating it. */
+ if (--info->gotent->use_count == 0)
+ {
+ int sz = sw_64_got_entry_size (r_type);
+ sw_64_elf_tdata (info->gotobj)->total_got_size -= sz;
+ if (!info->h)
+ sw_64_elf_tdata (info->gotobj)->local_got_size -= sz;
+ }
+
+ /* Smash the existing GOT relocation for its 16-bit immediate pair. */
+ irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), r_type);
+ info->changed_relocs = true;
+
+ /* ??? Search forward through this basic block looking for insns
+ that use the target register. Stop after an insn modifying the
+ register is seen, or after a branch or call.
+
+ Any such memory load insn may be substituted by a load directly
+ off the GP. This allows the memory load insn to be issued before
+ the calculated GP register would otherwise be ready.
+
+ Any such jsr insn can be replaced by a bsr if it is in range.
+
+ This would mean that we'd have to _add_ relocations, the pain of
+ which gives one pause. */
+
+ return true;
+}
+
+static bfd_vma
+elf64_sw_64_relax_opt_call (struct sw_64_relax_info *info, bfd_vma symval)
+{
+ /* If the function has the same gp, and we can identify that the
+ function does not use its function pointer, we can eliminate the
+ address load. */
+
+ /* If the symbol is marked NOPV, we are being told the function never
+ needs its procedure value. */
+ if ((info->other & STO_SW_64_STD_GPLOAD) == STO_SW_64_NOPV)
+ return symval;
+
+ /* If the symbol is marked STD_GP, we are being told the function does
+ a normal ldgp in the first two words. */
+ else if ((info->other & STO_SW_64_STD_GPLOAD) == STO_SW_64_STD_GPLOAD)
+ ;
+
+ /* Otherwise, we may be able to identify a GP load in the first two
+ words, which we can then skip. */
+ else
+ {
+ Elf_Internal_Rela *tsec_relocs, *tsec_relend, *tsec_free, *gpdisp;
+ bfd_vma ofs;
+
+ /* Load the relocations from the section that the target symbol is in. */
+ if (info->sec == info->tsec)
+ {
+ tsec_relocs = info->relocs;
+ tsec_relend = info->relend;
+ tsec_free = NULL;
+ }
+ else
+ {
+ tsec_relocs
+ = (_bfd_elf_link_read_relocs (info->abfd, info->tsec, NULL,
+ (Elf_Internal_Rela *) NULL,
+ info->link_info->keep_memory));
+ if (tsec_relocs == NULL)
+ return 0;
+ tsec_relend = tsec_relocs + info->tsec->reloc_count;
+ tsec_free = (elf_section_data (info->tsec)->relocs == tsec_relocs
+ ? NULL
+ : tsec_relocs);
+ }
+
+ /* Recover the symbol's offset within the section. */
+ ofs = (symval - info->tsec->output_section->vma
+ - info->tsec->output_offset);
+
+ /* Look for a GPDISP reloc. */
+ gpdisp = (elf64_sw_64_find_reloc_at_ofs (tsec_relocs, tsec_relend, ofs,
+ R_SW_64_GPDISP));
+
+ if (!gpdisp || gpdisp->r_addend != 4)
+ {
+ free (tsec_free);
+ return 0;
+ }
+ free (tsec_free);
+ }
+
+ /* We've now determined that we can skip an initial gp load. Verify
+ that the call and the target use the same gp. */
+ if (info->link_info->output_bfd->xvec != info->tsec->owner->xvec
+ || info->gotobj != sw_64_elf_tdata (info->tsec->owner)->gotobj)
+ return 0;
+
+ return symval + 8;
+}
+
+int addpi_flag = 0;
+bfd_signed_vma addpi_odisp = 0;
+int ldr_offset = 0;
+static bool
+elf64_sw_64_relax_with_lituse (struct sw_64_relax_info *info, bfd_vma symval,
+ Elf_Internal_Rela *irel)
+{
+ Elf_Internal_Rela *urel, *erel, *irelend = info->relend;
+ int flags;
+ bfd_signed_vma disp;
+ bool fits16;
+ bool fits32;
+ bool lit_reused = false;
+ bool all_optimized = true;
+ bool changed_contents;
+ bool changed_relocs;
+ bfd_byte *contents = info->contents;
+ bfd *abfd = info->abfd;
+ bfd_vma sec_output_vma;
+ unsigned int lit_insn;
+ int relax_pass;
+
+ lit_insn = bfd_get_32 (abfd, contents + irel->r_offset);
+ if (lit_insn >> 26 != OP_LDL && lit_insn >> 26 != OP_LDW)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: %pA+%#" PRIx64 ": warning: "
+ "%s relocation against unexpected insn"),
+ abfd, info->sec, (uint64_t) irel->r_offset, "LITERAL");
+ return true;
+ }
+
+ /* Can't relax dynamic symbols. */
+ if (info->h != NULL
+ && sw_64_elf_dynamic_symbol_p (&info->h->root, info->link_info))
+ return true;
+
+ changed_contents = info->changed_contents;
+ changed_relocs = info->changed_relocs;
+ sec_output_vma = info->sec->output_section->vma + info->sec->output_offset;
+ relax_pass = info->link_info->relax_pass;
+
+ /* Summarize how this particular LITERAL is used. */
+ for (erel = irel + 1, flags = 0; erel < irelend; ++erel)
+ {
+ if (ELF64_R_TYPE (erel->r_info) != R_SW_64_LITUSE)
+ break;
+ if (erel->r_addend <= 6)
+ flags |= 1 << erel->r_addend;
+ }
+
+ /* A little preparation for the loop... */
+ disp = symval - info->gp;
+
+ for (urel = irel + 1; urel < erel; ++urel)
+ {
+ bfd_vma urel_r_offset = urel->r_offset;
+ unsigned int insn;
+ int insn_disp;
+ bfd_signed_vma xdisp;
+ Elf_Internal_Rela nrel;
+
+ insn = bfd_get_32 (abfd, contents + urel_r_offset);
+
+ switch (urel->r_addend)
+ {
+ case LITUSE_SW_64_ADDR:
+ default:
+ /* This type is really just a placeholder to note that all
+ uses cannot be optimized, but to still allow some. */
+ all_optimized = false;
+ break;
+
+ case LITUSE_SW_64_BASE:
+ /* We may only create GPREL relocs during the second pass. */
+ if (relax_pass == 0)
+ {
+ all_optimized = false;
+ break;
+ }
+
+ /* We can always optimize 16-bit displacements. */
+
+ /* Extract the displacement from the instruction, sign-extending
+ it if necessary, then test whether it is within 16 or 32 bits
+ displacement from GP. */
+ insn_disp = ((insn & 0xffff) ^ 0x8000) - 0x8000;
+
+ xdisp = disp + insn_disp;
+ fits16 = (xdisp >= -(bfd_signed_vma) 0x8000 && xdisp < 0x8000);
+ fits32
+ = (xdisp >= -(bfd_signed_vma) 0x80000000 && xdisp < 0x7fff8000);
+
+ if (fits16)
+ {
+ /* Take the op code and dest from this insn, take the base
+ register from the literal insn. Leave the offset alone. */
+ insn = (insn & 0xffe0ffff) | (lit_insn & 0x001f0000);
+ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
+ changed_contents = true;
+
+ nrel = *urel;
+ nrel.r_info
+ = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), R_SW_64_GPREL16);
+ nrel.r_addend = irel->r_addend;
+
+ /* As we adjust, move the reloc to the end so that we don't
+ break the LITERAL+LITUSE chain. */
+ if (urel < --erel)
+ *urel-- = *erel;
+ *erel = nrel;
+ changed_relocs = true;
+ }
+
+ /* If all mem+byte, we can optimize 32-bit mem displacements. */
+ else if (fits32 && !(flags & ~6))
+ {
+ /* FIXME: sanity check that lit insn Ra is mem insn Rb. */
+
+ irel->r_info
+ = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), R_SW_64_GPRELHIGH);
+ lit_insn = (OP_LDIH << 26) | (lit_insn & 0x03ff0000);
+ bfd_put_32 (abfd, (bfd_vma) lit_insn, contents + irel->r_offset);
+ lit_reused = true;
+ changed_contents = true;
+
+ /* Since all relocs must be optimized, don't bother swapping
+ this relocation to the end. */
+ urel->r_info
+ = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), R_SW_64_GPRELLOW);
+ urel->r_addend = irel->r_addend;
+ changed_relocs = true;
+ }
+ else
+ all_optimized = false;
+ break;
+
+ case LITUSE_SW_64_BYTOFF:
+ /* We can always optimize byte instructions. */
+
+ /* FIXME: sanity check the insn for byte op. Check that the
+ literal dest reg is indeed Rb in the byte insn. */
+
+ insn &= ~(unsigned) 0x001ff000;
+ insn |= ((symval & 7) << 13) | 0x1000;
+ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
+ changed_contents = true;
+
+ nrel = *urel;
+ nrel.r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+ nrel.r_addend = 0;
+
+ /* As we adjust, move the reloc to the end so that we don't
+ break the LITERAL+LITUSE chain. */
+ if (urel < --erel)
+ *urel-- = *erel;
+ *erel = nrel;
+ changed_relocs = true;
+ break;
+
+ case LITUSE_SW_64_JSR:
+ case LITUSE_SW_64_TLSGD:
+ case LITUSE_SW_64_TLSLDM:
+ case LITUSE_SW_64_JSRDIRECT: {
+ bfd_vma optdest, org;
+ bfd_signed_vma odisp;
+
+ /* For undefined weak symbols, we're mostly interested in getting
+ rid of the got entry whenever possible, so optimize this to a
+ use of the zero register. */
+ if (info->h && info->h->root.root.type == bfd_link_hash_undefweak)
+ {
+ insn |= 31 << 16;
+ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
+
+ changed_contents = true;
+ break;
+ }
+
+ /* If not zero, place to jump without needing pv. */
+ optdest = elf64_sw_64_relax_opt_call (info, symval);
+ org = sec_output_vma + urel_r_offset + 4;
+ odisp = (optdest ? optdest : symval) - org;
+ addpi_odisp = odisp;
+ if (odisp >= -0x400000 && odisp < 0x400000)
+ {
+ Elf_Internal_Rela *xrel;
+
+ /* Preserve branch prediction call stack when possible. */
+ if ((insn & INSN_JSR_MASK) == INSN_JSR)
+ insn = (OP_BSR << 26) | (insn & 0x03e00000);
+ else
+ insn = (OP_BR << 26) | (insn & 0x03e00000);
+ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
+ changed_contents = true;
+
+ nrel = *urel;
+ nrel.r_info
+ = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), R_SW_64_BRADDR);
+ nrel.r_addend = irel->r_addend;
+
+ if (optdest)
+ nrel.r_addend += optdest - symval;
+ else
+ all_optimized = false;
+
+ /* Kill any HINT reloc that might exist for this insn. */
+ xrel
+ = (elf64_sw_64_find_reloc_at_ofs (info->relocs, info->relend,
+ urel_r_offset,
+ R_SW_64_HINT));
+ if (xrel)
+ xrel->r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+
+ /* As we adjust, move the reloc to the end so that we don't
+ break the LITERAL+LITUSE chain. */
+ if (urel < --erel)
+ *urel-- = *erel;
+ *erel = nrel;
+
+ info->changed_relocs = true;
+ }
+ else if (odisp >= -0x2000000 && odisp < 0x2000000
+ && (insn & INSN_JSR_MASK) == INSN_JSR
+ && info->link_info->flag_sw_lbr == 8
+ && (urel_r_offset - irel->r_offset) < 8 && optdest
+ && ELF64_R_TYPE ((irel - 1)->r_info) == R_SW_64_LITERAL_GOT
+ && bfd_get_32 (abfd, contents + urel_r_offset + 4)
+ == 0xffba0000
+ && bfd_get_32 (abfd, contents + urel_r_offset + 8)
+ == 0xfbbd0000)
+ {
+ Elf_Internal_Rela *xrel;
+ /* Preserve branch prediction call stack when possible. */
+ if ((insn & INSN_JSR_MASK) == INSN_JSR)
+ insn = (0x1d << 26);
+ else
+ {
+ printf ("lbr error\n");
+ insn = (0x1d << 26);
+ }
+ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
+ changed_contents = true;
+ addpi_flag = 1;
+ ldr_offset = urel_r_offset;
+
+ nrel = *urel;
+ nrel.r_info
+ = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), R_SW_64_BR26ADDR);
+ nrel.r_addend = irel->r_addend;
+
+ if (optdest)
+ nrel.r_addend += optdest - symval;
+ else
+ all_optimized = false;
+
+ /* Kill any HINT reloc that might exist for this insn. */
+ xrel
+ = (elf64_sw_64_find_reloc_at_ofs (info->relocs, info->relend,
+ urel_r_offset,
+ R_SW_64_HINT));
+ if (xrel)
+ xrel->r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+
+ /* As we adjust, move the reloc to the end so that we don't
+ break the LITERAL+LITUSE chain. */
+ if (urel < --erel)
+ *urel-- = *erel;
+ *erel = nrel;
+
+ info->changed_relocs = true;
+ }
+ else
+ all_optimized = false;
+
+ /* Even if the target is not in range for a direct branch,
+ if we share a GP, we can eliminate the gp reload. */
+ if (optdest)
+ {
+ Elf_Internal_Rela *gpdisp
+ = (elf64_sw_64_find_reloc_at_ofs (info->relocs, irelend,
+ urel_r_offset + 4,
+ R_SW_64_GPDISP));
+ if (gpdisp)
+ {
+ bfd_byte *p_ldih = contents + gpdisp->r_offset;
+ bfd_byte *p_ldi = p_ldih + gpdisp->r_addend;
+ unsigned int ldih = bfd_get_32 (abfd, p_ldih);
+ unsigned int ldi = bfd_get_32 (abfd, p_ldi);
+
+ /* Verify that the instruction is "ldih $29,0 ($26)".
+ Consider a function that ends in a noreturn call,
+ and that the next function begins with an ldgp,
+ and that by accident there is no padding between.
+ In that case the insn would use $27 as the base. */
+
+ if (ldih == 0xffba0000 && ldi == 0xfbba0000)
+ {
+ bfd_put_32 (abfd, (bfd_vma) INSN_UNOP, p_ldih);
+ bfd_put_32 (abfd, (bfd_vma) INSN_UNOP, p_ldi);
+
+ gpdisp->r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+ changed_contents = true;
+ changed_relocs = true;
+ }
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ /* If we reused the literal instruction, we must have optimized all. */
+ BFD_ASSERT (!lit_reused || all_optimized);
+
+ /* If all cases were optimized, we can reduce the use count on this
+ got entry by one, possibly eliminating it. */
+ if (all_optimized)
+ {
+ if (--info->gotent->use_count == 0)
+ {
+ int sz = sw_64_got_entry_size (R_SW_64_LITERAL);
+ sw_64_elf_tdata (info->gotobj)->total_got_size -= sz;
+ if (!info->h)
+ sw_64_elf_tdata (info->gotobj)->local_got_size -= sz;
+ }
+
+ /* If the literal instruction is no longer needed (it may have been
+ reused. We can eliminate it. */
+ /* ??? For now, I don't want to deal with compacting the section,
+ so just nop it out. */
+ if (!lit_reused)
+ {
+ /* this will cause sw_64/brk.S err. */
+ if (ELF64_R_TYPE ((irel - 1)->r_info) == R_SW_64_LITERAL_GOT)
+ {
+ (irel - 1)->r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+ changed_relocs = true;
+
+ bfd_put_32 (abfd, (bfd_vma) INSN_UNOP,
+ contents + (irel - 1)->r_offset);
+ }
+ irel->r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+ changed_relocs = true;
+ if (addpi_odisp >= -0x400000 && addpi_odisp < 0x400000
+ || addpi_flag == 0)
+ bfd_put_32 (abfd, (bfd_vma) INSN_UNOP, contents + irel->r_offset);
+ else
+ {
+ addpi_flag = 0;
+ bfd_put_32 (abfd,
+ (bfd_vma) INSN_ADDPI
+ | ((ldr_offset - irel->r_offset) / 4) << 13,
+ contents + irel->r_offset);
+ }
+ changed_contents = true;
+ }
+ }
+
+ info->changed_contents = changed_contents;
+ info->changed_relocs = changed_relocs;
+
+ if (all_optimized || relax_pass == 0)
+ return true;
+ return elf64_sw_64_relax_got_load (info, symval, irel, R_SW_64_LITERAL);
+}
+
+static bool
+elf64_sw_64_relax_tls_get_addr (struct sw_64_relax_info *info, bfd_vma symval,
+ Elf_Internal_Rela *irel, bool is_gd)
+{
+ bfd_byte *pos[5];
+ unsigned int insn, tlsgd_reg;
+ Elf_Internal_Rela *gpdisp, *hint;
+ bool dynamic, use_gottprel;
+ unsigned long new_symndx;
+
+ dynamic = (info->h != NULL
+ && sw_64_elf_dynamic_symbol_p (&info->h->root, info->link_info));
+
+ /* If a TLS symbol is accessed using IE at least once, there is no point
+ to use dynamic model for it. */
+ if (is_gd && info->h && (info->h->flags & SW_64_ELF_LINK_HASH_TLS_IE))
+ ;
+
+ /* If the symbol is local, and we've already committed to DF_STATIC_TLS,
+ then we might as well relax to IE. */
+ else if (bfd_link_pic (info->link_info) && !dynamic
+ && (info->link_info->flags & DF_STATIC_TLS))
+ ;
+
+ /* Otherwise we must be building an executable to do anything. */
+ else if (bfd_link_pic (info->link_info))
+ return true;
+
+ /* The TLSGD/TLSLDM relocation must be followed by a LITERAL and
+ the matching LITUSE_TLS relocations. */
+ if (irel + 2 >= info->relend)
+ return true;
+ if (ELF64_R_TYPE (irel[1].r_info) != R_SW_64_LITERAL
+ || ELF64_R_TYPE (irel[2].r_info) != R_SW_64_LITUSE
+ || irel[2].r_addend != (is_gd ? LITUSE_SW_64_TLSGD : LITUSE_SW_64_TLSLDM))
+ return true;
+
+ /* There must be a GPDISP relocation positioned immediately after the
+ LITUSE relocation. */
+ gpdisp = elf64_sw_64_find_reloc_at_ofs (info->relocs, info->relend,
+ irel[2].r_offset + 4, R_SW_64_GPDISP);
+ if (!gpdisp)
+ return true;
+
+ pos[0] = info->contents + irel[0].r_offset;
+ pos[1] = info->contents + irel[1].r_offset;
+ pos[2] = info->contents + irel[2].r_offset;
+ pos[3] = info->contents + gpdisp->r_offset;
+ pos[4] = pos[3] + gpdisp->r_addend;
+
+ /* Beware of the compiler hoisting part of the sequence out a loop
+ and adjusting the destination register for the TLSGD insn. If this
+ happens, there will be a move into $16 before the JSR insn, so only
+ transformations of the first insn pair should use this register. */
+ tlsgd_reg = bfd_get_32 (info->abfd, pos[0]);
+ tlsgd_reg = (tlsgd_reg >> 21) & 31;
+
+ /* Generally, the positions are not allowed to be out of order, lest the
+ modified insn sequence have different register lifetimes. We can make
+ an exception when pos 1 is adjacent to pos 0. */
+ if (pos[1] + 4 == pos[0])
+ {
+ bfd_byte *tmp = pos[0];
+ pos[0] = pos[1];
+ pos[1] = tmp;
+ }
+ if (pos[1] >= pos[2] || pos[2] >= pos[3])
+ return true;
+
+ /* Reduce the use count on the LITERAL relocation. Do this before we
+ smash the symndx when we adjust the relocations below. */
+ {
+ struct sw_64_elf_got_entry *lit_gotent;
+ struct sw_64_elf_link_hash_entry *lit_h;
+ unsigned long indx;
+
+ BFD_ASSERT (ELF64_R_SYM (irel[1].r_info) >= info->symtab_hdr->sh_info);
+ indx = ELF64_R_SYM (irel[1].r_info) - info->symtab_hdr->sh_info;
+ lit_h = sw_64_elf_sym_hashes (info->abfd)[indx];
+
+ while (lit_h->root.root.type == bfd_link_hash_indirect
+ || lit_h->root.root.type == bfd_link_hash_warning)
+ lit_h = (struct sw_64_elf_link_hash_entry *) lit_h->root.root.u.i.link;
+
+ for (lit_gotent = lit_h->got_entries; lit_gotent;
+ lit_gotent = lit_gotent->next)
+ if (lit_gotent->gotobj == info->gotobj
+ && lit_gotent->reloc_type == R_SW_64_LITERAL
+ && lit_gotent->addend == irel[1].r_addend)
+ break;
+ BFD_ASSERT (lit_gotent);
+
+ if (--lit_gotent->use_count == 0)
+ {
+ int sz = sw_64_got_entry_size (R_SW_64_LITERAL);
+ sw_64_elf_tdata (info->gotobj)->total_got_size -= sz;
+ }
+ }
+
+ /* Change
+
+ ldi $16,x ($gp) !tlsgd!1
+ ldl $27,__tls_get_addr ($gp) !literal!1
+ jsr $26, ($27),__tls_get_addr !lituse_tlsgd!1
+ ldih $29,0 ($26) !gpdisp!2
+ ldi $29,0 ($29) !gpdisp!2
+ to
+ ldl $16,x ($gp) !gottprel
+ unop
+ call_pal rduniq
+ addl $16,$0,$0
+ unop
+ or the first pair to
+ ldi $16,x ($gp) !tprel
+ unop
+ or
+ ldih $16,x ($gp) !tprelhi
+ ldi $16,x ($16) !tprello
+
+ as appropriate. */
+
+ use_gottprel = false;
+ new_symndx = is_gd ? ELF64_R_SYM (irel->r_info) : STN_UNDEF;
+
+ /* Some compilers warn about a Boolean-looking expression being
+ used in a switch. The explicit cast silences them. */
+ switch ((int) (!dynamic && !bfd_link_pic (info->link_info)))
+ {
+ case 1: {
+ bfd_vma tp_base;
+ bfd_signed_vma disp;
+
+ BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
+ tp_base = sw_64_get_tprel_base (info->link_info);
+ disp = symval - tp_base;
+
+ if (disp >= -0x8000 && disp < 0x8000)
+ {
+ insn = (OP_LDI << 26) | (tlsgd_reg << 21) | (31 << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
+
+ irel[0].r_offset = pos[0] - info->contents;
+ irel[0].r_info = ELF64_R_INFO (new_symndx, R_SW_64_TPREL16);
+ irel[1].r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+ break;
+ }
+ else if (disp >= -(bfd_signed_vma) 0x80000000
+ && disp < (bfd_signed_vma) 0x7fff8000 && pos[0] + 4 == pos[1])
+ {
+ insn = (OP_LDIH << 26) | (tlsgd_reg << 21) | (31 << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
+ insn = (OP_LDI << 26) | (tlsgd_reg << 21) | (tlsgd_reg << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[1]);
+
+ irel[0].r_offset = pos[0] - info->contents;
+ irel[0].r_info = ELF64_R_INFO (new_symndx, R_SW_64_TPRELHI);
+ irel[1].r_offset = pos[1] - info->contents;
+ irel[1].r_info = ELF64_R_INFO (new_symndx, R_SW_64_TPRELLO);
+ break;
+ }
+ }
+ /* FALLTHRU */
+
+ default:
+ use_gottprel = true;
+
+ insn = (OP_LDL << 26) | (tlsgd_reg << 21) | (29 << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
+
+ irel[0].r_offset = pos[0] - info->contents;
+ irel[0].r_info = ELF64_R_INFO (new_symndx, R_SW_64_GOTTPREL);
+ irel[1].r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+ break;
+ }
+
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_RDUNIQ, pos[2]);
+
+ insn = INSN_ADDL | (16 << 21) | (0 << 16) | (0 << 0);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[3]);
+
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[4]);
+
+ irel[2].r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+ gpdisp->r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+
+ hint = elf64_sw_64_find_reloc_at_ofs (info->relocs, info->relend,
+ irel[2].r_offset, R_SW_64_HINT);
+ if (hint)
+ hint->r_info = ELF64_R_INFO (0, R_SW_64_NONE);
+
+ info->changed_contents = true;
+ info->changed_relocs = true;
+
+ /* Reduce the use count on the TLSGD/TLSLDM relocation. */
+ if (--info->gotent->use_count == 0)
+ {
+ int sz = sw_64_got_entry_size (info->gotent->reloc_type);
+ sw_64_elf_tdata (info->gotobj)->total_got_size -= sz;
+ if (!info->h)
+ sw_64_elf_tdata (info->gotobj)->local_got_size -= sz;
+ }
+
+ /* If we've switched to a GOTTPREL relocation, increment the reference
+ count on that got entry. */
+ if (use_gottprel)
+ {
+ struct sw_64_elf_got_entry *tprel_gotent;
+
+ for (tprel_gotent = *info->first_gotent; tprel_gotent;
+ tprel_gotent = tprel_gotent->next)
+ if (tprel_gotent->gotobj == info->gotobj
+ && tprel_gotent->reloc_type == R_SW_64_GOTTPREL
+ && tprel_gotent->addend == irel->r_addend)
+ break;
+ if (tprel_gotent)
+ tprel_gotent->use_count++;
+ else
+ {
+ if (info->gotent->use_count == 0)
+ tprel_gotent = info->gotent;
+ else
+ {
+ tprel_gotent = (struct sw_64_elf_got_entry *)
+ bfd_alloc (info->abfd, sizeof (struct sw_64_elf_got_entry));
+ if (!tprel_gotent)
+ return false;
+
+ tprel_gotent->next = *info->first_gotent;
+ *info->first_gotent = tprel_gotent;
+
+ tprel_gotent->gotobj = info->gotobj;
+ tprel_gotent->addend = irel->r_addend;
+ tprel_gotent->got_offset = -1;
+ tprel_gotent->reloc_done = 0;
+ tprel_gotent->reloc_xlated = 0;
+ }
+
+ tprel_gotent->use_count = 1;
+ tprel_gotent->reloc_type = R_SW_64_GOTTPREL;
+ }
+ }
+
+ return true;
+}
+
+static bool
+elf64_sw_64_relax_section (bfd *abfd, asection *sec,
+ struct bfd_link_info *link_info, bool *again)
+{
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Rela *internal_relocs;
+ Elf_Internal_Rela *irel, *irelend;
+ Elf_Internal_Sym *isymbuf = NULL;
+ struct sw_64_elf_got_entry **local_got_entries;
+ struct sw_64_relax_info info;
+ struct sw_64_elf_link_hash_table *htab;
+ int relax_pass;
+
+ htab = sw_64_elf_hash_table (link_info);
+ if (htab == NULL)
+ return false;
+
+ /* There's nothing to change, yet. */
+ *again = false;
+
+ if (bfd_link_relocatable (link_info)
+ || ((sec->flags & (SEC_CODE | SEC_RELOC | SEC_ALLOC | SEC_HAS_CONTENTS))
+ != (SEC_CODE | SEC_RELOC | SEC_ALLOC | SEC_HAS_CONTENTS))
+ || sec->reloc_count == 0)
+ return true;
+
+ BFD_ASSERT (is_sw_64_elf (abfd));
+ relax_pass = link_info->relax_pass;
+
+ /* Make sure our GOT and PLT tables are up-to-date. */
+ if (htab->relax_trip != link_info->relax_trip)
+ {
+ htab->relax_trip = link_info->relax_trip;
+
+ /* This should never fail after the initial round, since the only error
+ is GOT overflow, and relaxation only shrinks the table. However, we
+ may only merge got sections during the first pass. If we merge
+ sections after we've created GPREL relocs, the GP for the merged
+ section backs up which may put the relocs out of range. */
+ if (!elf64_sw_64_size_got_sections (link_info, relax_pass == 0))
+ abort ();
+ if (elf_hash_table (link_info)->dynamic_sections_created)
+ {
+ elf64_sw_64_size_plt_section (link_info);
+ elf64_sw_64_size_rela_got_section (link_info);
+ }
+ }
+
+ symtab_hdr = &elf_symtab_hdr (abfd);
+ local_got_entries = sw_64_elf_tdata (abfd)->local_got_entries;
+
+ /* Load the relocations for this section. */
+ internal_relocs
+ = (_bfd_elf_link_read_relocs (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
+ link_info->keep_memory));
+ if (internal_relocs == NULL)
+ return false;
+
+ memset (&info, 0, sizeof (info));
+ info.abfd = abfd;
+ info.sec = sec;
+ info.link_info = link_info;
+ info.symtab_hdr = symtab_hdr;
+ info.relocs = internal_relocs;
+ info.relend = irelend = internal_relocs + sec->reloc_count;
+
+ /* Find the GP for this object. Do not store the result back via
+ _bfd_set_gp_value, since this could change again before final. */
+ info.gotobj = sw_64_elf_tdata (abfd)->gotobj;
+ if (info.gotobj)
+ {
+ asection *sgot = sw_64_elf_tdata (info.gotobj)->got;
+ info.gp = (sgot->output_section->vma + sgot->output_offset + 0x8000);
+ }
+
+ /* Get the section contents. */
+ if (elf_section_data (sec)->this_hdr.contents != NULL)
+ info.contents = elf_section_data (sec)->this_hdr.contents;
+ else
+ {
+ if (!bfd_malloc_and_get_section (abfd, sec, &info.contents))
+ goto error_return;
+ }
+
+ for (irel = internal_relocs; irel < irelend; irel++)
+ {
+ bfd_vma symval;
+ struct sw_64_elf_got_entry *gotent;
+ unsigned long r_type = ELF64_R_TYPE (irel->r_info);
+ unsigned long r_symndx = ELF64_R_SYM (irel->r_info);
+
+ /* Early exit for unhandled or unrelaxable relocations. */
+ if (r_type != R_SW_64_LITERAL)
+ {
+ /* We complete everything except LITERAL in the first pass. */
+ if (relax_pass != 0)
+ continue;
+ if (r_type == R_SW_64_TLSLDM)
+ {
+ /* The symbol for a TLSLDM reloc is ignored. Collapse the
+ reloc to the STN_UNDEF (0) symbol so that they all match. */
+ r_symndx = STN_UNDEF;
+ }
+ else if (r_type != R_SW_64_GOTDTPREL && r_type != R_SW_64_GOTTPREL
+ && r_type != R_SW_64_TLSGD)
+ continue;
+ }
+
+ /* Get the value of the symbol referred to by the reloc. */
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ /* A local symbol. */
+ Elf_Internal_Sym *isym;
+
+ /* Read this BFD's local symbols. */
+ if (isymbuf == NULL)
+ {
+ isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (isymbuf == NULL)
+ isymbuf
+ = bfd_elf_get_elf_syms (abfd, symtab_hdr, symtab_hdr->sh_info,
+ 0, NULL, NULL, NULL);
+ if (isymbuf == NULL)
+ goto error_return;
+ }
+
+ isym = isymbuf + r_symndx;
+
+ /* Given the symbol for a TLSLDM reloc is ignored, this also
+ means forcing the symbol value to the tp base. */
+ if (r_type == R_SW_64_TLSLDM)
+ {
+ info.tsec = bfd_abs_section_ptr;
+ symval = sw_64_get_tprel_base (info.link_info);
+ }
+ else
+ {
+ symval = isym->st_value;
+ if (isym->st_shndx == SHN_UNDEF)
+ continue;
+ else if (isym->st_shndx == SHN_ABS)
+ info.tsec = bfd_abs_section_ptr;
+ else if (isym->st_shndx == SHN_COMMON)
+ info.tsec = bfd_com_section_ptr;
+ else
+ info.tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
+ }
+
+ info.h = NULL;
+ info.other = isym->st_other;
+ if (local_got_entries)
+ info.first_gotent = &local_got_entries[r_symndx];
+ else
+ {
+ info.first_gotent = &info.gotent;
+ info.gotent = NULL;
+ }
+ }
+ else
+ {
+ unsigned long indx;
+ struct sw_64_elf_link_hash_entry *h;
+
+ indx = r_symndx - symtab_hdr->sh_info;
+ h = sw_64_elf_sym_hashes (abfd)[indx];
+ BFD_ASSERT (h != NULL);
+
+ while (h->root.root.type == bfd_link_hash_indirect
+ || h->root.root.type == bfd_link_hash_warning)
+ h = (struct sw_64_elf_link_hash_entry *) h->root.root.u.i.link;
+
+ /* If the symbol is undefined, we can't do anything with it. */
+ if (h->root.root.type == bfd_link_hash_undefined)
+ continue;
+
+ /* If the symbol isn't defined in the current module,
+ again we can't do anything. */
+ if (h->root.root.type == bfd_link_hash_undefweak)
+ {
+ info.tsec = bfd_abs_section_ptr;
+ symval = 0;
+ }
+ else if (!h->root.def_regular)
+ {
+ /* Except for TLSGD relocs, which can sometimes be
+ relaxed to GOTTPREL relocs. */
+ if (r_type != R_SW_64_TLSGD)
+ continue;
+ info.tsec = bfd_abs_section_ptr;
+ symval = 0;
+ }
+ else
+ {
+ info.tsec = h->root.root.u.def.section;
+ symval = h->root.root.u.def.value;
+ }
+
+ info.h = h;
+ info.other = h->root.other;
+ info.first_gotent = &h->got_entries;
+ }
+
+ /* Search for the got entry to be used by this relocation. */
+ for (gotent = *info.first_gotent; gotent; gotent = gotent->next)
+ if (gotent->gotobj == info.gotobj && gotent->reloc_type == r_type
+ && gotent->addend == irel->r_addend)
+ break;
+ info.gotent = gotent;
+
+ symval += info.tsec->output_section->vma + info.tsec->output_offset;
+ symval += irel->r_addend;
+
+ switch (r_type)
+ {
+ case R_SW_64_LITERAL:
+ BFD_ASSERT (info.gotent != NULL);
+
+ /* If there exist LITUSE relocations immediately following, this
+ opens up all sorts of interesting optimizations, because we
+ now know every location that this address load is used. */
+ if (irel + 1 < irelend
+ && ELF64_R_TYPE (irel[1].r_info) == R_SW_64_LITUSE)
+ {
+ if (!elf64_sw_64_relax_with_lituse (&info, symval, irel))
+ goto error_return;
+ }
+ else
+ {
+ if (!elf64_sw_64_relax_got_load (&info, symval, irel, r_type))
+ goto error_return;
+ }
+ break;
+
+ case R_SW_64_GOTDTPREL:
+ case R_SW_64_GOTTPREL:
+ BFD_ASSERT (info.gotent != NULL);
+ if (!elf64_sw_64_relax_got_load (&info, symval, irel, r_type))
+ goto error_return;
+ break;
+
+ case R_SW_64_TLSGD:
+ case R_SW_64_TLSLDM:
+ BFD_ASSERT (info.gotent != NULL);
+ if (!elf64_sw_64_relax_tls_get_addr (&info, symval, irel,
+ r_type == R_SW_64_TLSGD))
+ goto error_return;
+ break;
+ }
+ }
+
+ if (isymbuf != NULL && symtab_hdr->contents != (unsigned char *) isymbuf)
+ {
+ if (!link_info->keep_memory)
+ free (isymbuf);
+ else
+ {
+ /* Cache the symbols for elf_link_input_bfd. */
+ symtab_hdr->contents = (unsigned char *) isymbuf;
+ }
+ }
+
+ if (info.contents != NULL
+ && elf_section_data (sec)->this_hdr.contents != info.contents)
+ {
+ if (!info.changed_contents && !link_info->keep_memory)
+ free (info.contents);
+ else
+ {
+ /* Cache the section contents for elf_link_input_bfd. */
+ elf_section_data (sec)->this_hdr.contents = info.contents;
+ }
+ }
+
+ if (elf_section_data (sec)->relocs != internal_relocs)
+ {
+ if (!info.changed_relocs)
+ free (internal_relocs);
+ else
+ elf_section_data (sec)->relocs = internal_relocs;
+ }
+
+ *again = info.changed_contents || info.changed_relocs;
+
+ return true;
+
+error_return:
+ if (symtab_hdr->contents != (unsigned char *) isymbuf)
+ free (isymbuf);
+ if (elf_section_data (sec)->this_hdr.contents != info.contents)
+ free (info.contents);
+ if (elf_section_data (sec)->relocs != internal_relocs)
+ free (internal_relocs);
+ return false;
+}
+
+/* Emit a dynamic relocation for (DYNINDX, RTYPE, ADDEND) at (SEC, OFFSET)
+ into the next available slot in SREL. */
+
+static void
+elf64_sw_64_emit_dynrel (bfd *abfd, struct bfd_link_info *info, asection *sec,
+ asection *srel, bfd_vma offset, long dynindx,
+ long rtype, bfd_vma addend)
+{
+ Elf_Internal_Rela outrel;
+ bfd_byte *loc;
+
+ BFD_ASSERT (srel != NULL);
+
+ outrel.r_info = ELF64_R_INFO (dynindx, rtype);
+ outrel.r_addend = addend;
+
+ offset = _bfd_elf_section_offset (abfd, info, sec, offset);
+ if ((offset | 1) != (bfd_vma) -1)
+ outrel.r_offset = sec->output_section->vma + sec->output_offset + offset;
+ else
+ memset (&outrel, 0, sizeof (outrel));
+
+ loc = srel->contents;
+ loc += srel->reloc_count++ * sizeof (Elf64_External_Rela);
+ bfd_elf64_swap_reloca_out (abfd, &outrel, loc);
+ BFD_ASSERT (sizeof (Elf64_External_Rela) * srel->reloc_count <= srel->size);
+}
+
+/* Relocate an Sw_64 ELF section for a relocatable link.
+
+ We don't have to change anything unless the reloc is against a section
+ symbol, in which case we have to adjust according to where the section
+ symbol winds up in the output section. */
+
+static bool
+elf64_sw_64_relocate_section_r (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ bfd *input_bfd, asection *input_section,
+ bfd_byte *contents ATTRIBUTE_UNUSED,
+ Elf_Internal_Rela *relocs,
+ Elf_Internal_Sym *local_syms,
+ asection **local_sections)
+{
+ unsigned long symtab_hdr_sh_info;
+ Elf_Internal_Rela *rel;
+ Elf_Internal_Rela *relend;
+ struct elf_link_hash_entry **sym_hashes;
+ bool ret_val = true;
+
+ symtab_hdr_sh_info = elf_symtab_hdr (input_bfd).sh_info;
+ sym_hashes = elf_sym_hashes (input_bfd);
+
+ relend = relocs + input_section->reloc_count;
+ for (rel = relocs; rel < relend; rel++)
+ {
+ unsigned long r_symndx;
+ Elf_Internal_Sym *sym;
+ asection *sec;
+ unsigned long r_type;
+
+ r_type = ELF64_R_TYPE (rel->r_info);
+ if (r_type >= R_SW_64_max)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: unsupported relocation type %#x"), input_bfd,
+ (int) r_type);
+ bfd_set_error (bfd_error_bad_value);
+ ret_val = false;
+ continue;
+ }
+
+ /* The symbol associated with GPDISP and LITUSE is
+ immaterial. Only the addend is significant. */
+ if (r_type == R_SW_64_GPDISP || r_type == R_SW_64_LITUSE)
+ continue;
+
+ r_symndx = ELF64_R_SYM (rel->r_info);
+ if (r_symndx < symtab_hdr_sh_info)
+ {
+ sym = local_syms + r_symndx;
+ sec = local_sections[r_symndx];
+ }
+ else
+ {
+ struct elf_link_hash_entry *h;
+
+ h = sym_hashes[r_symndx - symtab_hdr_sh_info];
+
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ if (h->root.type != bfd_link_hash_defined
+ && h->root.type != bfd_link_hash_defweak)
+ continue;
+
+ sym = NULL;
+ sec = h->root.u.def.section;
+ }
+
+ if (sec != NULL && discarded_section (sec))
+ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, rel, 1,
+ relend,
+ elf64_sw_64_howto_table + r_type, 0,
+ contents);
+
+ if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
+ rel->r_addend += sec->output_offset;
+ }
+
+ return ret_val;
+}
+
+/* Relocate an Sw_64 ELF section. */
+
+static bool
+elf64_sw_64_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
+ bfd *input_bfd, asection *input_section,
+ bfd_byte *contents, Elf_Internal_Rela *relocs,
+ Elf_Internal_Sym *local_syms,
+ asection **local_sections)
+{
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Rela *rel;
+ Elf_Internal_Rela *relend;
+ asection *sgot, *srel, *srelgot;
+ bfd *dynobj, *gotobj;
+ bfd_vma gp, tp_base, dtp_base;
+ struct sw_64_elf_got_entry **local_got_entries;
+ bool ret_val;
+
+ BFD_ASSERT (is_sw_64_elf (input_bfd));
+
+ /* Handle relocatable links with a smaller loop. */
+ if (bfd_link_relocatable (info))
+ return elf64_sw_64_relocate_section_r (output_bfd, info, input_bfd,
+ input_section, contents, relocs,
+ local_syms, local_sections);
+
+ /* This is a final link. */
+
+ ret_val = true;
+
+ symtab_hdr = &elf_symtab_hdr (input_bfd);
+
+ dynobj = elf_hash_table (info)->dynobj;
+ srelgot = elf_hash_table (info)->srelgot;
+
+ if (input_section->flags & SEC_ALLOC)
+ {
+ const char *section_name;
+ section_name = (bfd_elf_string_from_elf_section (
+ input_bfd, elf_elfheader (input_bfd)->e_shstrndx,
+ _bfd_elf_single_rel_hdr (input_section)->sh_name));
+ BFD_ASSERT (section_name != NULL);
+ srel = bfd_get_linker_section (dynobj, section_name);
+ }
+ else
+ srel = NULL;
+
+ /* Find the gp value for this input bfd. */
+ gotobj = sw_64_elf_tdata (input_bfd)->gotobj;
+ if (gotobj)
+ {
+ sgot = sw_64_elf_tdata (gotobj)->got;
+ gp = _bfd_get_gp_value (gotobj);
+ if (gp == 0)
+ {
+ gp = (sgot->output_section->vma + sgot->output_offset + 0x8000);
+ _bfd_set_gp_value (gotobj, gp);
+ }
+ }
+ else
+ {
+ sgot = NULL;
+ gp = 0;
+ }
+
+ local_got_entries = sw_64_elf_tdata (input_bfd)->local_got_entries;
+
+ if (elf_hash_table (info)->tls_sec != NULL)
+ {
+ dtp_base = sw_64_get_dtprel_base (info);
+ tp_base = sw_64_get_tprel_base (info);
+ }
+ else
+ dtp_base = tp_base = 0;
+
+ relend = relocs + input_section->reloc_count;
+ for (rel = relocs; rel < relend; rel++)
+ {
+ struct sw_64_elf_link_hash_entry *h = NULL;
+ struct sw_64_elf_got_entry *gotent;
+ bfd_reloc_status_type r;
+ reloc_howto_type *howto;
+ unsigned long r_symndx;
+ Elf_Internal_Sym *sym = NULL;
+ asection *sec = NULL;
+ bfd_vma value;
+ bfd_vma addend;
+ bool dynamic_symbol_p;
+ bool unresolved_reloc = false;
+ bool undef_weak_ref = false;
+ unsigned long r_type;
+
+ r_type = ELF64_R_TYPE (rel->r_info);
+ if (r_type >= R_SW_64_max)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: unsupported relocation type %#x"), input_bfd,
+ (int) r_type);
+ bfd_set_error (bfd_error_bad_value);
+ ret_val = false;
+ continue;
+ }
+
+ howto = elf64_sw_64_howto_table + r_type;
+ r_symndx = ELF64_R_SYM (rel->r_info);
+
+ /* The symbol for a TLSLDM reloc is ignored. Collapse the
+ reloc to the STN_UNDEF (0) symbol so that they all match. */
+ if (r_type == R_SW_64_TLSLDM)
+ r_symndx = STN_UNDEF;
+
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ asection *msec;
+ sym = local_syms + r_symndx;
+ sec = local_sections[r_symndx];
+ msec = sec;
+ value = _bfd_elf_rela_local_sym (output_bfd, sym, &msec, rel);
+
+ /* If this is a tp-relative relocation against sym STN_UNDEF (0),
+ this is hackery from relax_section. Force the value to
+ be the tls module base. */
+ if (r_symndx == STN_UNDEF
+ && (r_type == R_SW_64_TLSLDM || r_type == R_SW_64_GOTTPREL
+ || r_type == R_SW_64_TPREL64 || r_type == R_SW_64_TPRELHI
+ || r_type == R_SW_64_TPRELLO || r_type == R_SW_64_TPREL16))
+ value = dtp_base;
+
+ if (local_got_entries)
+ gotent = local_got_entries[r_symndx];
+ else
+ gotent = NULL;
+
+ /* Need to adjust local GOT entries' addends for SEC_MERGE
+ unless it has been done already. */
+ if ((sec->flags & SEC_MERGE)
+ && ELF_ST_TYPE (sym->st_info) == STT_SECTION
+ && sec->sec_info_type == SEC_INFO_TYPE_MERGE && gotent
+ && !gotent->reloc_xlated)
+ {
+ struct sw_64_elf_got_entry *ent;
+
+ for (ent = gotent; ent; ent = ent->next)
+ {
+ ent->reloc_xlated = 1;
+ if (ent->use_count == 0)
+ continue;
+ msec = sec;
+ ent->addend = _bfd_merged_section_offset (
+ output_bfd, &msec, elf_section_data (sec)->sec_info,
+ sym->st_value + ent->addend);
+ ent->addend -= sym->st_value;
+ ent->addend += msec->output_section->vma + msec->output_offset
+ - sec->output_section->vma
+ - sec->output_offset;
+ }
+ }
+
+ dynamic_symbol_p = false;
+ }
+ else
+ {
+ bool warned, ignored;
+ struct elf_link_hash_entry *hh;
+ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (input_bfd);
+
+ RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
+ r_symndx, symtab_hdr, sym_hashes, hh, sec,
+ value, unresolved_reloc, warned, ignored);
+
+ if (warned)
+ continue;
+
+ if (value == 0 && !unresolved_reloc
+ && hh->root.type == bfd_link_hash_undefweak)
+ undef_weak_ref = true;
+
+ h = (struct sw_64_elf_link_hash_entry *) hh;
+ dynamic_symbol_p = sw_64_elf_dynamic_symbol_p (&h->root, info);
+ gotent = h->got_entries;
+ }
+
+ if (sec != NULL && discarded_section (sec))
+ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, rel, 1,
+ relend, howto, 0, contents);
+
+ addend = rel->r_addend;
+ value += addend;
+
+ /* Search for the proper got entry. */
+ for (; gotent; gotent = gotent->next)
+ if (gotent->gotobj == gotobj && gotent->reloc_type == r_type
+ && gotent->addend == addend)
+ break;
+
+ switch (r_type)
+ {
+ case R_SW_64_GPDISP: {
+ bfd_byte *p_ldih, *p_ldi;
+
+ BFD_ASSERT (gp != 0);
+
+ value = (input_section->output_section->vma
+ + input_section->output_offset + rel->r_offset);
+
+ p_ldih = contents + rel->r_offset;
+ p_ldi = p_ldih + rel->r_addend;
+
+ r = elf64_sw_64_do_reloc_gpdisp (input_bfd, gp - value, p_ldih,
+ p_ldi);
+ }
+ break;
+
+ case R_SW_64_LITERAL:
+ BFD_ASSERT (sgot != NULL);
+ BFD_ASSERT (gp != 0);
+ BFD_ASSERT (gotent != NULL);
+ BFD_ASSERT (gotent->use_count >= 1);
+
+ if (!gotent->reloc_done)
+ {
+ gotent->reloc_done = 1;
+
+ bfd_put_64 (output_bfd, value,
+ sgot->contents + gotent->got_offset);
+
+ /* If the symbol has been forced local, output a
+ RELATIVE reloc, otherwise it will be handled in
+ finish_dynamic_symbol. */
+ if (bfd_link_pic (info) && !dynamic_symbol_p && !undef_weak_ref)
+ elf64_sw_64_emit_dynrel (output_bfd, info, sgot, srelgot,
+ gotent->got_offset, 0,
+ R_SW_64_RELATIVE, value);
+ }
+
+ value = (sgot->output_section->vma + sgot->output_offset
+ + gotent->got_offset);
+ Elf_Internal_Rela *rel_got;
+ rel_got = rel - 1;
+ if ((ELF64_R_TYPE (rel_got->r_info) == R_SW_64_LITERAL_GOT))
+ {
+ value -= gp;
+
+ /* emit the ldih $29, got_disp ($29). */
+ bfd_byte *p_ldih;
+ p_ldih = contents + rel_got->r_offset;
+ bfd_vma got_disp;
+ bfd_vma got_low;
+ bfd_vma got_tmp;
+ unsigned long i_ldih;
+ i_ldih = bfd_get_32 (input_bfd, p_ldih);
+ got_disp = ((i_ldih & 0xffff) << 16);
+ got_disp = (got_disp ^ 0x80008000) - 0x80008000;
+
+ if ((int) value >= 0x8000 || (int) value < -0x8000)
+ {
+ got_low = (short) value;
+ got_tmp = value - got_low;
+ got_disp = got_disp + got_tmp;
+ value = (bfd_vma) got_low;
+ }
+ i_ldih
+ = ((i_ldih & 0xffff0000)
+ | (((got_disp >> 16) + ((got_disp >> 15) & 1)) & 0xffff));
+ bfd_put_32 (input_bfd, (bfd_vma) i_ldih, p_ldih);
+
+ // emit ldl $27,disp ($27)
+ goto default_reloc;
+ }
+ else
+ {
+ value -= gp;
+ goto default_reloc;
+ }
+
+ case R_SW_64_GPREL32:
+ case R_SW_64_GPREL16:
+ case R_SW_64_GPRELLOW:
+ if (dynamic_symbol_p)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: gp-relative relocation against dynamic symbol %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ BFD_ASSERT (gp != 0);
+ value -= gp;
+ goto default_reloc;
+
+ case R_SW_64_GPRELHIGH:
+ if (dynamic_symbol_p)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: gp-relative relocation against dynamic symbol %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ BFD_ASSERT (gp != 0);
+ value -= gp;
+ value = ((bfd_signed_vma) value >> 16) + ((value >> 15) & 1);
+ goto default_reloc;
+
+ case R_SW_64_HINT:
+ /* A call to a dynamic symbol is definitely out of range of
+ the 16-bit displacement. Don't bother writing anything. */
+ if (dynamic_symbol_p)
+ {
+ r = bfd_reloc_ok;
+ break;
+ }
+ /* The regular PC-relative stuff measures from the start of
+ the instruction rather than the end. */
+ value -= 4;
+ goto default_reloc;
+
+ case R_SW_64_BRADDR:
+ if (dynamic_symbol_p)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: pc-relative relocation against dynamic symbol %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ /* The regular PC-relative stuff measures from the start of
+ the instruction rather than the end. */
+ value -= 4;
+ goto default_reloc;
+
+ case R_SW_64_BR26ADDR:
+ if (dynamic_symbol_p)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: pc-relative relocation against dynamic symbol %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ /* The regular PC-relative stuff measures from the start of
+ the instruction rather than the end. */
+ value -= 4;
+ goto default_reloc;
+
+ case R_SW_64_BRSGP: {
+ int other;
+ const char *name;
+
+ /* The regular PC-relative stuff measures from the start of
+ the instruction rather than the end. */
+ value -= 4;
+
+ /* The source and destination gp must be the same. Note that
+ the source will always have an assigned gp, since we forced
+ one in check_relocs, but that the destination may not, as
+ it might not have had any relocations at all. Also take
+ care not to crash if H is an undefined symbol. */
+ if (h != NULL && sec != NULL && sw_64_elf_tdata (sec->owner)->gotobj
+ && gotobj != sw_64_elf_tdata (sec->owner)->gotobj)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: change in gp: BRSGP %s"), input_bfd,
+ h->root.root.root.string);
+ ret_val = false;
+ }
+
+ /* The symbol should be marked either NOPV or STD_GPLOAD. */
+ if (h != NULL)
+ other = h->root.other;
+ else
+ other = sym->st_other;
+ switch (other & STO_SW_64_STD_GPLOAD)
+ {
+ case STO_SW_64_NOPV:
+ break;
+ case STO_SW_64_STD_GPLOAD:
+ value += 8;
+ break;
+ default:
+ if (h != NULL)
+ name = h->root.root.root.string;
+ else
+ {
+ name
+ = (bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ sym->st_name));
+ if (name == NULL)
+ name = _ ("<unknown>");
+ else if (name[0] == 0)
+ name = bfd_section_name (sec);
+ }
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ (
+ "%pB: !samegp reloc against symbol without .prologue: %s"),
+ input_bfd, name);
+ ret_val = false;
+ break;
+ }
+
+ goto default_reloc;
+ }
+
+ case R_SW_64_REFLONG:
+ case R_SW_64_REFQUAD:
+ case R_SW_64_DTPREL64:
+ case R_SW_64_TPREL64: {
+ long dynindx, dyntype = r_type;
+ bfd_vma dynaddend;
+
+ /* Careful here to remember RELATIVE relocations for global
+ variables for symbolic shared objects. */
+
+ if (dynamic_symbol_p)
+ {
+ BFD_ASSERT (h->root.dynindx != -1);
+ dynindx = h->root.dynindx;
+ dynaddend = addend;
+ addend = 0, value = 0;
+ }
+ else if (r_type == R_SW_64_DTPREL64)
+ {
+ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
+ value -= dtp_base;
+ goto default_reloc;
+ }
+ else if (r_type == R_SW_64_TPREL64)
+ {
+ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
+ if (!bfd_link_dll (info))
+ {
+ value -= tp_base;
+ goto default_reloc;
+ }
+ dynindx = 0;
+ dynaddend = value - dtp_base;
+ }
+ else if (bfd_link_pic (info) && r_symndx != STN_UNDEF
+ && (input_section->flags & SEC_ALLOC) && !undef_weak_ref
+ && !(unresolved_reloc
+ && (_bfd_elf_section_offset (
+ output_bfd, info, input_section, rel->r_offset)
+ == (bfd_vma) -1)))
+ {
+ if (r_type == R_SW_64_REFLONG)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: unhandled dynamic relocation against %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ dynindx = 0;
+ dyntype = R_SW_64_RELATIVE;
+ dynaddend = value;
+ }
+ else
+ goto default_reloc;
+
+ if (input_section->flags & SEC_ALLOC)
+ elf64_sw_64_emit_dynrel (output_bfd, info, input_section, srel,
+ rel->r_offset, dynindx, dyntype,
+ dynaddend);
+ }
+ goto default_reloc;
+
+ case R_SW_64_SREL16:
+ case R_SW_64_SREL32:
+ case R_SW_64_SREL64:
+ if (dynamic_symbol_p)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: pc-relative relocation against dynamic symbol %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ else if (bfd_link_pic (info) && undef_weak_ref)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: pc-relative relocation against undefined weak symbol "
+ "%s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+
+ /* ??? .eh_frame references to discarded sections will be smashed
+ to relocations against SHN_UNDEF. The .eh_frame format allows
+ NULL to be encoded as 0 in any format, so this works here. */
+ if (r_symndx == STN_UNDEF
+ || (unresolved_reloc
+ && _bfd_elf_section_offset (output_bfd, info, input_section,
+ rel->r_offset)
+ == (bfd_vma) -1))
+ howto = (elf64_sw_64_howto_table
+ + (r_type - R_SW_64_SREL32 + R_SW_64_REFLONG));
+ goto default_reloc;
+
+ case R_SW_64_TLSLDM:
+ /* Ignore the symbol for the relocation. The result is always
+ the current module. */
+ dynamic_symbol_p = 0;
+ /* FALLTHRU */
+
+ case R_SW_64_TLSGD:
+ if (!gotent->reloc_done)
+ {
+ gotent->reloc_done = 1;
+
+ /* Note that the module index for the main program is 1. */
+ bfd_put_64 (output_bfd, !bfd_link_pic (info) && !dynamic_symbol_p,
+ sgot->contents + gotent->got_offset);
+
+ /* If the symbol has been forced local, output a
+ DTPMOD64 reloc, otherwise it will be handled in
+ finish_dynamic_symbol. */
+ if (bfd_link_pic (info) && !dynamic_symbol_p)
+ elf64_sw_64_emit_dynrel (output_bfd, info, sgot, srelgot,
+ gotent->got_offset, 0,
+ R_SW_64_DTPMOD64, 0);
+
+ if (dynamic_symbol_p || r_type == R_SW_64_TLSLDM)
+ value = 0;
+ else
+ {
+ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
+ value -= dtp_base;
+ }
+ bfd_put_64 (output_bfd, value,
+ sgot->contents + gotent->got_offset + 8);
+ }
+
+ value = (sgot->output_section->vma + sgot->output_offset
+ + gotent->got_offset);
+ rel_got = rel - 1;
+ if ((ELF64_R_TYPE (rel_got->r_info) == R_SW_64_TLSREL_GOT))
+ {
+ value -= gp;
+ /* emit the ldih $29, got_disp ($29). */
+ bfd_byte *p_ldih;
+ p_ldih = contents + rel_got->r_offset;
+ bfd_vma got_disp;
+ bfd_vma got_low;
+ bfd_vma got_tmp;
+ unsigned long i_ldih;
+ i_ldih = bfd_get_32 (input_bfd, p_ldih);
+ got_disp = ((i_ldih & 0xffff) << 16);
+ got_disp = (got_disp ^ 0x80008000) - 0x80008000;
+
+ if ((int) value >= 0x8000 || (int) value < -0x8000)
+ {
+ got_low = (short) value;
+ got_tmp = value - got_low;
+ got_disp = got_disp + got_tmp;
+ value = (bfd_vma) got_low;
+ }
+ i_ldih
+ = ((i_ldih & 0xffff0000)
+ | (((got_disp >> 16) + ((got_disp >> 15) & 1)) & 0xffff));
+ bfd_put_32 (input_bfd, (bfd_vma) i_ldih, p_ldih);
+
+ // emit ldl $27,disp ($27)
+ goto default_reloc;
+ }
+ else
+ {
+ value -= gp;
+ goto default_reloc;
+ }
+
+ case R_SW_64_DTPRELHI:
+ case R_SW_64_DTPRELLO:
+ case R_SW_64_DTPREL16:
+ if (dynamic_symbol_p)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: dtp-relative relocation against dynamic symbol %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
+ value -= dtp_base;
+ if (r_type == R_SW_64_DTPRELHI)
+ value = ((bfd_signed_vma) value >> 16) + ((value >> 15) & 1);
+ goto default_reloc;
+
+ case R_SW_64_TPRELHI:
+ case R_SW_64_TPRELLO:
+ case R_SW_64_TPREL16:
+ if (bfd_link_dll (info))
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: TLS local exec code cannot be linked into shared "
+ "objects"),
+ input_bfd);
+ ret_val = false;
+ }
+ else if (dynamic_symbol_p)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_ ("%pB: tp-relative relocation against dynamic symbol %s"),
+ input_bfd, h->root.root.root.string);
+ ret_val = false;
+ }
+ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
+ value -= tp_base;
+ if (r_type == R_SW_64_TPRELHI)
+ value = ((bfd_signed_vma) value >> 16) + ((value >> 15) & 1);
+ goto default_reloc;
+
+ case R_SW_64_GOTDTPREL:
+ case R_SW_64_GOTTPREL:
+ BFD_ASSERT (sgot != NULL);
+ BFD_ASSERT (gp != 0);
+ BFD_ASSERT (gotent != NULL);
+ BFD_ASSERT (gotent->use_count >= 1);
+
+ if (!gotent->reloc_done)
+ {
+ gotent->reloc_done = 1;
+
+ if (dynamic_symbol_p)
+ value = 0;
+ else
+ {
+ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
+ if (r_type == R_SW_64_GOTDTPREL)
+ value -= dtp_base;
+ else if (bfd_link_executable (info))
+ value -= tp_base;
+ else
+ {
+ elf64_sw_64_emit_dynrel (output_bfd, info, sgot, srelgot,
+ gotent->got_offset, 0,
+ R_SW_64_TPREL64,
+ value - dtp_base);
+ value = 0;
+ }
+ }
+ bfd_put_64 (output_bfd, value,
+ sgot->contents + gotent->got_offset);
+ }
+
+ value = (sgot->output_section->vma + sgot->output_offset
+ + gotent->got_offset);
+ rel_got = rel - 1;
+ if ((ELF64_R_TYPE (rel_got->r_info) == R_SW_64_TLSREL_GOT))
+ {
+ value -= gp;
+ /* emit the ldih $29, got_disp ($29). */
+ bfd_byte *p_ldih;
+ p_ldih = contents + rel_got->r_offset;
+ bfd_vma got_disp;
+ bfd_vma got_low;
+ bfd_vma got_tmp;
+ unsigned long i_ldih;
+ i_ldih = bfd_get_32 (input_bfd, p_ldih);
+ got_disp = ((i_ldih & 0xffff) << 16);
+ got_disp = (got_disp ^ 0x80008000) - 0x80008000;
+
+ if ((int) value >= 0x8000 || (int) value < -0x8000)
+ {
+ got_low = (short) value;
+ got_tmp = value - got_low;
+ got_disp = got_disp + got_tmp;
+ value = (bfd_vma) got_low;
+ }
+ i_ldih
+ = ((i_ldih & 0xffff0000)
+ | (((got_disp >> 16) + ((got_disp >> 15) & 1)) & 0xffff));
+ bfd_put_32 (input_bfd, (bfd_vma) i_ldih, p_ldih);
+
+ // emit ldl $27,disp ($27)
+ goto default_reloc;
+ }
+ else
+ {
+ value -= gp;
+ goto default_reloc;
+ }
+
+ case R_SW_64_TLSREL_GOT:
+ r = bfd_reloc_ok;
+ break;
+
+ case R_SW_64_LITERAL_GOT:
+ r = bfd_reloc_ok;
+ break;
+
+ default:
+ default_reloc:
+ r = _bfd_final_link_relocate (howto, input_bfd, input_section,
+ contents, rel->r_offset, value, 0);
+ break;
+ }
+
+ switch (r)
+ {
+ case bfd_reloc_ok:
+ break;
+
+ case bfd_reloc_overflow: {
+ const char *name;
+
+ /* Don't warn if the overflow is due to pc relative reloc
+ against discarded section. Section optimization code should
+ handle it. */
+
+ if (r_symndx < symtab_hdr->sh_info && sec != NULL
+ && howto->pc_relative && discarded_section (sec))
+ break;
+
+ if (h != NULL)
+ name = NULL;
+ else
+ {
+ name = (bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ sym->st_name));
+ if (name == NULL)
+ return false;
+ if (*name == '\0')
+ name = bfd_section_name (sec);
+ }
+ (*info->callbacks->reloc_overflow) (info,
+ (h ? &h->root.root : NULL),
+ name, howto->name, (bfd_vma) 0,
+ input_bfd, input_section,
+ rel->r_offset);
+ }
+ break;
+
+ default:
+ case bfd_reloc_outofrange:
+ abort ();
+ }
+ }
+
+ return ret_val;
+}
+
+/* Finish up dynamic symbol handling. We set the contents of various
+ dynamic sections here. */
+
+static bool
+elf64_sw_64_finish_dynamic_symbol (bfd *output_bfd, struct bfd_link_info *info,
+ struct elf_link_hash_entry *h,
+ Elf_Internal_Sym *sym)
+{
+ struct sw_64_elf_link_hash_entry *ah = (struct sw_64_elf_link_hash_entry *) h;
+
+ if (h->needs_plt)
+ {
+ /* Fill in the .plt entry for this symbol. */
+ asection *splt, *sgot, *srel;
+ Elf_Internal_Rela outrel;
+ bfd_byte *loc;
+ bfd_vma got_addr, plt_addr;
+ bfd_vma plt_index;
+ struct sw_64_elf_got_entry *gotent;
+
+ BFD_ASSERT (h->dynindx != -1);
+
+ splt = elf_hash_table (info)->splt;
+ BFD_ASSERT (splt != NULL);
+ srel = elf_hash_table (info)->srelplt;
+ BFD_ASSERT (srel != NULL);
+
+ for (gotent = ah->got_entries; gotent; gotent = gotent->next)
+ if (gotent->reloc_type == R_SW_64_LITERAL && gotent->use_count > 0)
+ {
+ unsigned int insn;
+ int disp;
+
+ sgot = sw_64_elf_tdata (gotent->gotobj)->got;
+ BFD_ASSERT (sgot != NULL);
+
+ BFD_ASSERT (gotent->got_offset != -1);
+ BFD_ASSERT (gotent->plt_offset != -1);
+
+ got_addr = (sgot->output_section->vma + sgot->output_offset
+ + gotent->got_offset);
+ plt_addr = (splt->output_section->vma + splt->output_offset
+ + gotent->plt_offset);
+
+ plt_index = (gotent->plt_offset - PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
+
+ /* Fill in the entry in the procedure linkage table. */
+ if (elf64_sw_64_use_secureplt)
+ {
+ disp = (PLT_HEADER_SIZE - 4) - (gotent->plt_offset + 4);
+ insn = INSN_AD (INSN_BR, 31, disp);
+ bfd_put_32 (output_bfd, insn,
+ splt->contents + gotent->plt_offset);
+
+ plt_index = ((gotent->plt_offset - NEW_PLT_HEADER_SIZE)
+ / NEW_PLT_ENTRY_SIZE);
+ }
+ else
+ {
+ disp = -(gotent->plt_offset + 4);
+ insn = INSN_AD (INSN_BR, 28, disp);
+ bfd_put_32 (output_bfd, insn,
+ splt->contents + gotent->plt_offset);
+ bfd_put_32 (output_bfd, INSN_UNOP,
+ splt->contents + gotent->plt_offset + 4);
+ bfd_put_32 (output_bfd, INSN_UNOP,
+ splt->contents + gotent->plt_offset + 8);
+
+ plt_index = ((gotent->plt_offset - OLD_PLT_HEADER_SIZE)
+ / OLD_PLT_ENTRY_SIZE);
+ }
+
+ /* Fill in the entry in the .rela.plt section. */
+ outrel.r_offset = got_addr;
+ outrel.r_info = ELF64_R_INFO (h->dynindx, R_SW_64_JMP_SLOT);
+ outrel.r_addend = 0;
+
+ loc = srel->contents + plt_index * sizeof (Elf64_External_Rela);
+ bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
+
+ /* Fill in the entry in the .got. */
+ bfd_put_64 (output_bfd, plt_addr,
+ sgot->contents + gotent->got_offset);
+ }
+ }
+ else if (sw_64_elf_dynamic_symbol_p (h, info))
+ {
+ /* Fill in the dynamic relocations for this symbol's .got entries. */
+ asection *srel;
+ struct sw_64_elf_got_entry *gotent;
+
+ srel = elf_hash_table (info)->srelgot;
+ BFD_ASSERT (srel != NULL);
+
+ for (gotent = ((struct sw_64_elf_link_hash_entry *) h)->got_entries;
+ gotent != NULL; gotent = gotent->next)
+ {
+ asection *sgot;
+ long r_type;
+
+ if (gotent->use_count == 0)
+ continue;
+
+ sgot = sw_64_elf_tdata (gotent->gotobj)->got;
+
+ r_type = gotent->reloc_type;
+ switch (r_type)
+ {
+ case R_SW_64_LITERAL:
+ r_type = R_SW_64_GLOB_DAT;
+ break;
+ case R_SW_64_TLSGD:
+ r_type = R_SW_64_DTPMOD64;
+ break;
+ case R_SW_64_GOTDTPREL:
+ r_type = R_SW_64_DTPREL64;
+ break;
+ case R_SW_64_GOTTPREL:
+ r_type = R_SW_64_TPREL64;
+ break;
+ case R_SW_64_TLSLDM:
+ default:
+ abort ();
+ }
+
+ elf64_sw_64_emit_dynrel (output_bfd, info, sgot, srel,
+ gotent->got_offset, h->dynindx, r_type,
+ gotent->addend);
+
+ if (gotent->reloc_type == R_SW_64_TLSGD)
+ elf64_sw_64_emit_dynrel (output_bfd, info, sgot, srel,
+ gotent->got_offset + 8, h->dynindx,
+ R_SW_64_DTPREL64, gotent->addend);
+ }
+ }
+
+ /* Mark some specially defined symbols as absolute. */
+ if (h == elf_hash_table (info)->hdynamic || h == elf_hash_table (info)->hgot
+ || h == elf_hash_table (info)->hplt)
+ sym->st_shndx = SHN_ABS;
+
+ return true;
+}
+
+/* Finish up the dynamic sections. */
+
+static bool
+elf64_sw_64_finish_dynamic_sections (bfd *output_bfd,
+ struct bfd_link_info *info)
+{
+ bfd *dynobj;
+ asection *sdyn;
+
+ dynobj = elf_hash_table (info)->dynobj;
+ sdyn = bfd_get_linker_section (dynobj, ".dynamic");
+
+ if (elf_hash_table (info)->dynamic_sections_created)
+ {
+ asection *splt, *sgotplt, *srelaplt;
+ Elf64_External_Dyn *dyncon, *dynconend;
+ bfd_vma plt_vma, gotplt_vma;
+
+ splt = elf_hash_table (info)->splt;
+ srelaplt = elf_hash_table (info)->srelplt;
+ BFD_ASSERT (splt != NULL && sdyn != NULL);
+
+ plt_vma = splt->output_section->vma + splt->output_offset;
+
+ gotplt_vma = 0;
+ if (elf64_sw_64_use_secureplt)
+ {
+ sgotplt = elf_hash_table (info)->sgotplt;
+ BFD_ASSERT (sgotplt != NULL);
+ if (sgotplt->size > 0)
+ gotplt_vma = sgotplt->output_section->vma + sgotplt->output_offset;
+ }
+
+ dyncon = (Elf64_External_Dyn *) sdyn->contents;
+ dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
+ for (; dyncon < dynconend; dyncon++)
+ {
+ Elf_Internal_Dyn dyn;
+
+ bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
+
+ switch (dyn.d_tag)
+ {
+ case DT_PLTGOT:
+ dyn.d_un.d_ptr = elf64_sw_64_use_secureplt ? gotplt_vma : plt_vma;
+ break;
+ case DT_PLTRELSZ:
+ dyn.d_un.d_val = srelaplt ? srelaplt->size : 0;
+ break;
+ case DT_JMPREL:
+ dyn.d_un.d_ptr
+ = srelaplt
+ ? (srelaplt->output_section->vma + srelaplt->output_offset)
+ : 0;
+ break;
+ }
+
+ bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
+ }
+
+ /* Initialize the plt header. */
+ if (splt->size > 0)
+ {
+ unsigned int insn;
+ int ofs;
+
+ if (elf64_sw_64_use_secureplt)
+ {
+ ofs = gotplt_vma - (plt_vma + PLT_HEADER_SIZE);
+
+ insn = INSN_ABC (INSN_SUBL, 27, 28, 25);
+ bfd_put_32 (output_bfd, insn, splt->contents);
+
+ insn = INSN_ABO (INSN_LDIH, 28, 28, (ofs + 0x8000) >> 16);
+ bfd_put_32 (output_bfd, insn, splt->contents + 4);
+
+ insn = INSN_ABC (INSN_S4SUBL, 25, 25, 25);
+ bfd_put_32 (output_bfd, insn, splt->contents + 8);
+
+ insn = INSN_ABO (INSN_LDI, 28, 28, ofs);
+ bfd_put_32 (output_bfd, insn, splt->contents + 12);
+
+ insn = INSN_ABO (INSN_LDL, 27, 28, 0);
+ bfd_put_32 (output_bfd, insn, splt->contents + 16);
+
+ insn = INSN_ABC (INSN_ADDL, 25, 25, 25);
+ bfd_put_32 (output_bfd, insn, splt->contents + 20);
+
+ insn = INSN_ABO (INSN_LDL, 28, 28, 8);
+ bfd_put_32 (output_bfd, insn, splt->contents + 24);
+
+ insn = INSN_AB (INSN_JMP, 31, 27);
+ bfd_put_32 (output_bfd, insn, splt->contents + 28);
+
+ insn = INSN_AD (INSN_BR, 28, -PLT_HEADER_SIZE);
+ bfd_put_32 (output_bfd, insn, splt->contents + 32);
+ }
+ else
+ {
+ insn = INSN_AD (INSN_BR, 27, 0); /* br $27, .+4 */
+ bfd_put_32 (output_bfd, insn, splt->contents);
+
+ insn = INSN_ABO (INSN_LDL, 27, 27, 12);
+ bfd_put_32 (output_bfd, insn, splt->contents + 4);
+
+ insn = INSN_UNOP;
+ bfd_put_32 (output_bfd, insn, splt->contents + 8);
+
+ insn = INSN_AB (INSN_JMP, 27, 27);
+ bfd_put_32 (output_bfd, insn, splt->contents + 12);
+
+ /* The next two words will be filled in by ld.so. */
+ bfd_put_64 (output_bfd, 0, splt->contents + 16);
+ bfd_put_64 (output_bfd, 0, splt->contents + 24);
+ }
+
+ elf_section_data (splt->output_section)->this_hdr.sh_entsize = 0;
+ }
+ }
+
+ return true;
+}
+
+/* We need to use a special link routine to handle the .mdebug section.
+ We need to merge all instances of these sections together, not write
+ them all out sequentially. */
+
+static bool
+elf64_sw_64_final_link (bfd *abfd, struct bfd_link_info *info)
+{
+ asection *o;
+ struct bfd_link_order *p;
+ asection *mdebug_sec;
+ struct ecoff_debug_info debug;
+ const struct ecoff_debug_swap *swap
+ = get_elf_backend_data (abfd)->elf_backend_ecoff_debug_swap;
+ HDRR *symhdr = &debug.symbolic_header;
+ void *mdebug_handle = NULL;
+ struct sw_64_elf_link_hash_table *htab;
+
+ htab = sw_64_elf_hash_table (info);
+ if (htab == NULL)
+ return false;
+
+ /* Go through the sections and collect the mdebug information. */
+ mdebug_sec = NULL;
+ for (o = abfd->sections; o != (asection *) NULL; o = o->next)
+ {
+ if (strcmp (o->name, ".mdebug") == 0)
+ {
+ struct extsym_info einfo;
+
+ /* We have found the .mdebug section in the output file.
+ Look through all the link_orders comprising it and merge
+ the information together. */
+ symhdr->magic = swap->sym_magic;
+ /* FIXME: What should the version stamp be? */
+ symhdr->vstamp = 0;
+ symhdr->ilineMax = 0;
+ symhdr->cbLine = 0;
+ symhdr->idnMax = 0;
+ symhdr->ipdMax = 0;
+ symhdr->isymMax = 0;
+ symhdr->ioptMax = 0;
+ symhdr->iauxMax = 0;
+ symhdr->issMax = 0;
+ symhdr->issExtMax = 0;
+ symhdr->ifdMax = 0;
+ symhdr->crfd = 0;
+ symhdr->iextMax = 0;
+
+ /* We accumulate the debugging information itself in the
+ debug_info structure. */
+ debug.line = NULL;
+ debug.external_dnr = NULL;
+ debug.external_pdr = NULL;
+ debug.external_sym = NULL;
+ debug.external_opt = NULL;
+ debug.external_aux = NULL;
+ debug.ss = NULL;
+ debug.ssext = debug.ssext_end = NULL;
+ debug.external_fdr = NULL;
+ debug.external_rfd = NULL;
+ debug.external_ext = debug.external_ext_end = NULL;
+
+ mdebug_handle = bfd_ecoff_debug_init (abfd, &debug, swap, info);
+ if (mdebug_handle == NULL)
+ return false;
+
+ if (1)
+ {
+ asection *s;
+ EXTR esym;
+ bfd_vma last = 0;
+ unsigned int i;
+ static const char *const name[]
+ = {".text", ".init", ".fini", ".data",
+ ".rodata", ".sdata", ".sbss", ".bss"};
+ static const int sc[] = {scText, scInit, scFini, scData,
+ scRData, scSData, scSBss, scBss};
+
+ esym.jmptbl = 0;
+ esym.cobol_main = 0;
+ esym.weakext = 0;
+ esym.reserved = 0;
+ esym.ifd = ifdNil;
+ esym.asym.iss = issNil;
+ esym.asym.st = stLocal;
+ esym.asym.reserved = 0;
+ esym.asym.index = indexNil;
+ for (i = 0; i < 8; i++)
+ {
+ esym.asym.sc = sc[i];
+ s = bfd_get_section_by_name (abfd, name[i]);
+ if (s != NULL)
+ {
+ esym.asym.value = s->vma;
+ last = s->vma + s->size;
+ }
+ else
+ esym.asym.value = last;
+
+ if (!bfd_ecoff_debug_one_external (abfd, &debug, swap,
+ name[i], &esym))
+ return false;
+ }
+ }
+
+ for (p = o->map_head.link_order; p != (struct bfd_link_order *) NULL;
+ p = p->next)
+ {
+ asection *input_section;
+ bfd *input_bfd;
+ const struct ecoff_debug_swap *input_swap;
+ struct ecoff_debug_info input_debug;
+ char *eraw_src;
+ char *eraw_end;
+
+ if (p->type != bfd_indirect_link_order)
+ {
+ if (p->type == bfd_data_link_order)
+ continue;
+ abort ();
+ }
+
+ input_section = p->u.indirect.section;
+ input_bfd = input_section->owner;
+
+ if (!is_sw_64_elf (input_bfd))
+ /* I don't know what a non SW_64 ELF bfd would be
+ doing with a .mdebug section, but I don't really
+ want to deal with it. */
+ continue;
+
+ input_swap = (get_elf_backend_data (input_bfd)
+ ->elf_backend_ecoff_debug_swap);
+
+ BFD_ASSERT (p->size == input_section->size);
+
+ /* The ECOFF linking code expects that we have already
+ read in the debugging information and set up an
+ ecoff_debug_info structure, so we do that now. */
+ if (!elf64_sw_64_read_ecoff_info (input_bfd, input_section,
+ &input_debug))
+ return false;
+
+ if (!(bfd_ecoff_debug_accumulate (mdebug_handle, abfd, &debug,
+ swap, input_bfd, &input_debug,
+ input_swap, info)))
+ return false;
+
+ /* Loop through the external symbols. For each one with
+ interesting information, try to find the symbol in
+ the linker global hash table and save the information
+ for the output external symbols. */
+ eraw_src = (char *) input_debug.external_ext;
+ eraw_end = (eraw_src
+ + (input_debug.symbolic_header.iextMax
+ * input_swap->external_ext_size));
+ for (; eraw_src < eraw_end;
+ eraw_src += input_swap->external_ext_size)
+ {
+ EXTR ext;
+ const char *name;
+ struct sw_64_elf_link_hash_entry *h;
+
+ (*input_swap->swap_ext_in) (input_bfd, eraw_src, &ext);
+ if (ext.asym.sc == scNil || ext.asym.sc == scUndefined
+ || ext.asym.sc == scSUndefined)
+ continue;
+
+ name = input_debug.ssext + ext.asym.iss;
+ h = sw_64_elf_link_hash_lookup (htab, name, false, false,
+ true);
+ if (h == NULL || h->esym.ifd != -2)
+ continue;
+
+ if (ext.ifd != -1)
+ {
+ BFD_ASSERT (ext.ifd < input_debug.symbolic_header.ifdMax);
+ ext.ifd = input_debug.ifdmap[ext.ifd];
+ }
+
+ h->esym = ext;
+ }
+
+ /* Free up the information we just read. */
+ free (input_debug.line);
+ free (input_debug.external_dnr);
+ free (input_debug.external_pdr);
+ free (input_debug.external_sym);
+ free (input_debug.external_opt);
+ free (input_debug.external_aux);
+ free (input_debug.ss);
+ free (input_debug.ssext);
+ free (input_debug.external_fdr);
+ free (input_debug.external_rfd);
+ free (input_debug.external_ext);
+
+ /* Hack: reset the SEC_HAS_CONTENTS flag so that
+ elf_link_input_bfd ignores this section. */
+ input_section->flags &= ~SEC_HAS_CONTENTS;
+ }
+
+ /* Build the external symbol information. */
+ einfo.abfd = abfd;
+ einfo.info = info;
+ einfo.debug = &debug;
+ einfo.swap = swap;
+ einfo.failed = false;
+ elf_link_hash_traverse (elf_hash_table (info),
+ elf64_sw_64_output_extsym, &einfo);
+ if (einfo.failed)
+ return false;
+
+ /* Set the size of the .mdebug section. */
+ o->size = bfd_ecoff_debug_size (abfd, &debug, swap);
+
+ /* Skip this section later on (I don't think this currently
+ matters, but someday it might). */
+ o->map_head.link_order = (struct bfd_link_order *) NULL;
+
+ mdebug_sec = o;
+ }
+ }
+
+ /* Invoke the regular ELF backend linker to do all the work. */
+ if (!bfd_elf_final_link (abfd, info))
+ return false;
+
+ /* Now write out the computed sections. */
+
+ /* The .got subsections... */
+ {
+ bfd *i, *dynobj = elf_hash_table (info)->dynobj;
+ for (i = htab->got_list; i != NULL; i = sw_64_elf_tdata (i)->got_link_next)
+ {
+ asection *sgot;
+
+ /* elf_bfd_final_link already did everything in dynobj. */
+ if (i == dynobj)
+ continue;
+
+ sgot = sw_64_elf_tdata (i)->got;
+ if (!bfd_set_section_contents (abfd, sgot->output_section,
+ sgot->contents,
+ (file_ptr) sgot->output_offset,
+ sgot->size))
+ return false;
+ }
+ }
+
+ if (mdebug_sec != (asection *) NULL)
+ {
+ BFD_ASSERT (abfd->output_has_begun);
+ if (!bfd_ecoff_write_accumulated_debug (mdebug_handle, abfd, &debug, swap,
+ info, mdebug_sec->filepos))
+ return false;
+
+ bfd_ecoff_debug_free (mdebug_handle, abfd, &debug, swap, info);
+ }
+
+ return true;
+}
+
+static enum elf_reloc_type_class
+elf64_sw_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ const asection *rel_sec ATTRIBUTE_UNUSED,
+ const Elf_Internal_Rela *rela)
+{
+ switch ((int) ELF64_R_TYPE (rela->r_info))
+ {
+ case R_SW_64_RELATIVE:
+ return reloc_class_relative;
+ case R_SW_64_JMP_SLOT:
+ return reloc_class_plt;
+ case R_SW_64_COPY:
+ return reloc_class_copy;
+ default:
+ return reloc_class_normal;
+ }
+}
+
+static const struct bfd_elf_special_section elf64_sw_64_special_sections[]
+ = {{STRING_COMMA_LEN (".sbss"), -2, SHT_NOBITS,
+ SHF_ALLOC + SHF_WRITE + SHF_SW_64_GPREL},
+ {STRING_COMMA_LEN (".sdata"), -2, SHT_PROGBITS,
+ SHF_ALLOC + SHF_WRITE + SHF_SW_64_GPREL},
+ {NULL, 0, 0, 0, 0}};
+
+/* ECOFF swapping routines. These are used when dealing with the
+ .mdebug section, which is in the ECOFF debugging format. Copied
+ from elf32-mips.c. */
+static const struct ecoff_debug_swap elf64_sw_64_ecoff_debug_swap = {
+ /* Symbol table magic number. */
+ magicSym2,
+ /* Alignment of debugging information. E.g., 4. */
+ 8,
+ /* Sizes of external symbolic information. */
+ sizeof (struct hdr_ext), sizeof (struct dnr_ext), sizeof (struct pdr_ext),
+ sizeof (struct sym_ext), sizeof (struct opt_ext), sizeof (struct fdr_ext),
+ sizeof (struct rfd_ext), sizeof (struct ext_ext),
+ /* Functions to swap in external symbolic data. */
+ ecoff_swap_hdr_in, ecoff_swap_dnr_in, ecoff_swap_pdr_in, ecoff_swap_sym_in,
+ ecoff_swap_opt_in, ecoff_swap_fdr_in, ecoff_swap_rfd_in, ecoff_swap_ext_in,
+ _bfd_ecoff_swap_tir_in, _bfd_ecoff_swap_rndx_in,
+ /* Functions to swap out external symbolic data. */
+ ecoff_swap_hdr_out, ecoff_swap_dnr_out, ecoff_swap_pdr_out,
+ ecoff_swap_sym_out, ecoff_swap_opt_out, ecoff_swap_fdr_out,
+ ecoff_swap_rfd_out, ecoff_swap_ext_out, _bfd_ecoff_swap_tir_out,
+ _bfd_ecoff_swap_rndx_out,
+ /* Function to read in symbolic data. */
+ elf64_sw_64_read_ecoff_info};
+
+/* Use a non-standard hash bucket size of 8. */
+
+static const struct elf_size_info sw_64_elf_size_info
+ = {sizeof (Elf64_External_Ehdr),
+ sizeof (Elf64_External_Phdr),
+ sizeof (Elf64_External_Shdr),
+ sizeof (Elf64_External_Rel),
+ sizeof (Elf64_External_Rela),
+ sizeof (Elf64_External_Sym),
+ sizeof (Elf64_External_Dyn),
+ sizeof (Elf_External_Note),
+ 8,
+ 1,
+ 64,
+ 3,
+ ELFCLASS64,
+ EV_CURRENT,
+ bfd_elf64_write_out_phdrs,
+ bfd_elf64_write_shdrs_and_ehdr,
+ bfd_elf64_checksum_contents,
+ bfd_elf64_write_relocs,
+ bfd_elf64_swap_symbol_in,
+ bfd_elf64_swap_symbol_out,
+ bfd_elf64_slurp_reloc_table,
+ bfd_elf64_slurp_symbol_table,
+ bfd_elf64_swap_dyn_in,
+ bfd_elf64_swap_dyn_out,
+ bfd_elf64_swap_reloc_in,
+ bfd_elf64_swap_reloc_out,
+ bfd_elf64_swap_reloca_in,
+ bfd_elf64_swap_reloca_out};
+
+#define TARGET_LITTLE_SYM sw_64_elf64_vec
+#define TARGET_LITTLE_NAME "elf64-sw_64"
+#define ELF_ARCH bfd_arch_sw_64
+#define ELF_TARGET_ID SW_64_ELF_DATA
+#define ELF_MACHINE_CODE EM_SW_64
+#define ELF_MAXPAGESIZE 0x10000
+#define ELF_COMMONPAGESIZE 0x2000
+
+#define bfd_elf64_bfd_link_hash_table_create \
+ elf64_sw_64_bfd_link_hash_table_create
+
+#define bfd_elf64_bfd_reloc_type_lookup elf64_sw_64_bfd_reloc_type_lookup
+#define bfd_elf64_bfd_reloc_name_lookup elf64_sw_64_bfd_reloc_name_lookup
+#define elf_info_to_howto elf64_sw_64_info_to_howto
+
+#define bfd_elf64_mkobject elf64_sw_64_mkobject
+#define elf_backend_object_p elf64_sw_64_object_p
+
+#define elf_backend_section_from_shdr elf64_sw_64_section_from_shdr
+#define elf_backend_section_flags elf64_sw_64_section_flags
+#define elf_backend_fake_sections elf64_sw_64_fake_sections
+
+#define bfd_elf64_bfd_is_local_label_name elf64_sw_64_is_local_label_name
+#define bfd_elf64_find_nearest_line elf64_sw_64_find_nearest_line
+#define bfd_elf64_bfd_relax_section elf64_sw_64_relax_section
+
+#define elf_backend_add_symbol_hook elf64_sw_64_add_symbol_hook
+#define elf_backend_relocs_compatible _bfd_elf_relocs_compatible
+#define elf_backend_sort_relocs_p elf64_sw_64_sort_relocs_p
+#define elf_backend_check_relocs elf64_sw_64_check_relocs
+#define elf_backend_create_dynamic_sections elf64_sw_64_create_dynamic_sections
+#define elf_backend_adjust_dynamic_symbol elf64_sw_64_adjust_dynamic_symbol
+#define elf_backend_merge_symbol_attribute elf64_sw_64_merge_symbol_attribute
+#define elf_backend_copy_indirect_symbol elf64_sw_64_copy_indirect_symbol
+#define elf_backend_always_size_sections elf64_sw_64_always_size_sections
+#define elf_backend_size_dynamic_sections elf64_sw_64_size_dynamic_sections
+#define elf_backend_omit_section_dynsym _bfd_elf_omit_section_dynsym_all
+#define elf_backend_relocate_section elf64_sw_64_relocate_section
+#define elf_backend_finish_dynamic_symbol elf64_sw_64_finish_dynamic_symbol
+#define elf_backend_finish_dynamic_sections elf64_sw_64_finish_dynamic_sections
+#define bfd_elf64_bfd_final_link elf64_sw_64_final_link
+#define elf_backend_reloc_type_class elf64_sw_64_reloc_type_class
+
+#define elf_backend_can_gc_sections 1
+#define elf_backend_gc_mark_hook elf64_sw_64_gc_mark_hook
+
+#define elf_backend_ecoff_debug_swap &elf64_sw_64_ecoff_debug_swap
+
+#define elf_backend_size_info sw_64_elf_size_info
+
+#define elf_backend_special_sections elf64_sw_64_special_sections
+
+#define elf_backend_strip_zero_sized_dynamic_sections \
+ _bfd_elf_strip_zero_sized_dynamic_sections
+
+/* A few constants that determine how the .plt section is set up. */
+#define elf_backend_want_got_plt 0
+#define elf_backend_plt_readonly 0
+#define elf_backend_want_plt_sym 1
+#define elf_backend_got_header_size 0
+#define elf_backend_dtrel_excludes_plt 1
+
+#include "elf64-target.h"
+
+/* FreeBSD support. */
+
+#undef TARGET_LITTLE_SYM
+#define TARGET_LITTLE_SYM sw_64_elf64_fbsd_vec
+#undef TARGET_LITTLE_NAME
+#define TARGET_LITTLE_NAME "elf64-sw_64-freebsd"
+#undef ELF_OSABI
+#define ELF_OSABI ELFOSABI_FREEBSD
+
+/* The kernel recognizes executables as valid only if they carry a
+ "FreeBSD" label in the ELF header. So we put this label on all
+ executables and (for simplicity) also all other object files. */
+
+static bool
+elf64_sw_64_fbsd_init_file_header (bfd *abfd, struct bfd_link_info *info)
+{
+ Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
+
+ if (!_bfd_elf_init_file_header (abfd, info))
+ return false;
+
+ i_ehdrp = elf_elfheader (abfd);
+
+ /* Put an ABI label supported by FreeBSD >= 4.1. */
+ i_ehdrp->e_ident[EI_OSABI] = get_elf_backend_data (abfd)->elf_osabi;
+#ifdef OLD_FREEBSD_ABI_LABEL
+ /* The ABI label supported by FreeBSD <= 4.0 is quite nonstandard. */
+ memcpy (&i_ehdrp->e_ident[EI_ABIVERSION], "FreeBSD", 8);
+#endif
+ return true;
+}
+#undef elf_backend_init_file_header
+#define elf_backend_init_file_header elf64_sw_64_fbsd_init_file_header
+
+#undef elf64_bed
+#define elf64_bed elf64_sw_64_fbsd_bed
+
+#include "elf64-target.h"
diff --git a/bfd/hosts/sw_64linux.h b/bfd/hosts/sw_64linux.h
new file mode 100644
index 00000000..e5dfe2a1
--- /dev/null
+++ b/bfd/hosts/sw_64linux.h
@@ -0,0 +1,25 @@
+/* Copyright (C) 2007-2023 Free Software Foundation, Inc.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* Linux dumps "struct task_struct" at the end of the core-file. This
+ structure is currently 1080 bytes long, but we allow up to 4096
+ bytes to allow for some future growth. */
+#define TRAD_CORE_EXTRA_SIZE_ALLOWED 4096
+#define TRAD_UNIX_CORE_FILE_FAILING_SIGNAL(abfd) \
+ ((abfd)->tdata.trad_core_data->u.signal)
diff --git a/bfd/libbfd-in.h b/bfd/libbfd-in.h
index 03ae099c..a72fedb0 100644
--- a/bfd/libbfd-in.h
+++ b/bfd/libbfd-in.h
@@ -413,8 +413,12 @@ extern bfd_cleanup _bfd_vms_lib_alpha_archive_p
(bfd *) ATTRIBUTE_HIDDEN;
extern bfd_cleanup _bfd_vms_lib_ia64_archive_p
(bfd *) ATTRIBUTE_HIDDEN;
+extern const bfd_target *_bfd_vms_lib_sw_64_archive_p
+ (bfd *) ATTRIBUTE_HIDDEN;
extern bool _bfd_vms_lib_alpha_mkarchive
(bfd *) ATTRIBUTE_HIDDEN;
+extern bfd_boolean _bfd_vms_lib_sw_64_mkarchive
+ (bfd *) ATTRIBUTE_HIDDEN;
extern bool _bfd_vms_lib_ia64_mkarchive
(bfd *) ATTRIBUTE_HIDDEN;
diff --git a/bfd/libbfd.h b/bfd/libbfd.h
index d4fb3107..40ac4a78 100644
--- a/bfd/libbfd.h
+++ b/bfd/libbfd.h
@@ -1241,6 +1241,36 @@ static const char *const bfd_reloc_code_real_names[] = { "@@uninitialized@@",
"BFD_RELOC_ALPHA_TPREL_HI16",
"BFD_RELOC_ALPHA_TPREL_LO16",
"BFD_RELOC_ALPHA_TPREL16",
+#ifdef TARGET_SW_64
+ "BFD_RELOC_SW_64_GPDISP_HI16",
+ "BFD_RELOC_SW_64_GPDISP_LO16",
+ "BFD_RELOC_SW_64_GPDISP",
+ "BFD_RELOC_SW_64_LITERAL",
+ "BFD_RELOC_SW_64_ELF_LITERAL",
+ "BFD_RELOC_SW_64_LITUSE",
+ "BFD_RELOC_SW_64_HINT",
+ "BFD_RELOC_SW_64_LINKAGE",
+ "BFD_RELOC_SW_64_CODEADDR",
+ "BFD_RELOC_SW_64_GPREL_HI16",
+ "BFD_RELOC_SW_64_GPREL_LO16",
+ "BFD_RELOC_SW_64_BRSGP"
+ "BFD_RELOC_SW_64_NOP",
+ "BFD_RELOC_SW_64_BSR",
+ "BFD_RELOC_SW_64_LDA",
+ "BFD_RELOC_SW_64_BOH",
+ "BFD_RELOC_SW_64_TLSGD",
+ "BFD_RELOC_SW_64_TLSLDM",
+ "BFD_RELOC_SW_64_DTPMOD64",
+ "BFD_RELOC_SW_64_GOTDTPREL16",
+ "BFD_RELOC_SW_64_DTPREL64",
+ "BFD_RELOC_SW_64_DTPREL_HI16",
+ "BFD_RELOC_SW_64_DTPREL_LO16",
+ "BFD_RELOC_SW_64_DTPREL16",
+ "BFD_RELOC_SW_64_GOTTPREL16",
+ "BFD_RELOC_SW_64_TPREL64",
+ "BFD_RELOC_SW_64_TLSREL_GOT",
+ "BFD_RELOC_SW_64_ELF_LITERAL_GOT",
+#endif
"BFD_RELOC_MIPS_JMP",
"BFD_RELOC_MICROMIPS_JMP",
"BFD_RELOC_MIPS16_JMP",
diff --git a/bfd/peicode.h b/bfd/peicode.h
index e2e2be65..7f5f196d 100644
--- a/bfd/peicode.h
+++ b/bfd/peicode.h
@@ -1203,6 +1203,10 @@ pe_ILF_object_p (bfd * abfd)
case IMAGE_FILE_MACHINE_UNKNOWN:
case IMAGE_FILE_MACHINE_ALPHA:
case IMAGE_FILE_MACHINE_ALPHA64:
+#ifdef TARGET_SW_64
+ case IMAGE_FILE_MACHINE_SW_64:
+ case IMAGE_FILE_MACHINE_SW_6464:
+#endif
case IMAGE_FILE_MACHINE_IA64:
break;
diff --git a/bfd/reloc.c b/bfd/reloc.c
index fbc67ac7..bb961186 100644
--- a/bfd/reloc.c
+++ b/bfd/reloc.c
@@ -1546,6 +1546,8 @@ ENUMX
BFD_RELOC_16_PCREL_S2
ENUMX
BFD_RELOC_23_PCREL_S2
+ENUMX
+ BFD_RELOC_SW_64_BR26
ENUMDOC
These PC-relative relocations are stored as word displacements --
i.e., byte displacements shifted right two bits. The 30-bit word
diff --git a/bfd/targets.c b/bfd/targets.c
index 3dbcd088..02ca240d 100644
--- a/bfd/targets.c
+++ b/bfd/targets.c
@@ -690,6 +690,14 @@ extern const bfd_target alpha_elf64_vec;
extern const bfd_target alpha_elf64_fbsd_vec;
extern const bfd_target alpha_vms_vec;
extern const bfd_target alpha_vms_lib_txt_vec;
+#ifdef TARGET_SW_64
+extern const bfd_target sw_64_ecoff_le_vec;
+extern const bfd_target sw_64_elf64_vec;
+extern const bfd_target sw_64_elf64_fbsd_vec;
+extern const bfd_target sw_64_nlm32_vec;
+extern const bfd_target sw_64_vms_vec;
+extern const bfd_target sw_64_vms_lib_txt_vec;
+#endif
extern const bfd_target am33_elf32_linux_vec;
extern const bfd_target amdgcn_elf64_le_vec;
extern const bfd_target aout_vec;
@@ -1011,6 +1019,15 @@ static const bfd_target * const _bfd_target_vector[] =
#endif
&alpha_vms_lib_txt_vec,
+#ifdef BFD64
+ &sw_64_ecoff_le_vec,
+ &sw_64_elf64_vec,
+ &sw_64_elf64_fbsd_vec,
+ &sw_64_nlm32_vec,
+ &sw_64_vms_vec,
+#endif
+ &sw_64_vms_lib_txt_vec,
+
&am33_elf32_linux_vec,
#if 0
diff --git a/binutils/config.in b/binutils/config.in
index 91fe00af..2c53f57d 100644
--- a/binutils/config.in
+++ b/binutils/config.in
@@ -262,3 +262,5 @@
/* Define to 1 if you need to in order for `stat' and other things to work. */
#undef _POSIX_SOURCE
+
+#undef TARGET_SW_64
diff --git a/binutils/configure b/binutils/configure
index e0bf7f1e..664dbbeb 100755
--- a/binutils/configure
+++ b/binutils/configure
@@ -14675,6 +14675,11 @@ do
mep-*)
OBJDUMP_DEFS="-DSKIP_ZEROES=256 -DSKIP_ZEROES_AT_END=0"
;;
+ sw_64-*-*)
+cat >>confdefs.h <<_ACEOF
+#define TARGET_SW_64
+_ACEOF
+ ;;
esac
# Add objdump private vectors.
diff --git a/binutils/readelf.c b/binutils/readelf.c
index 97d72d0b..f8be1eba 100644
--- a/binutils/readelf.c
+++ b/binutils/readelf.c
@@ -96,6 +96,9 @@
#include "elf/aarch64.h"
#include "elf/alpha.h"
+#ifdef TARGET_SW_64
+#include "elf/sw_64.h"
+#endif
#include "elf/amdgpu.h"
#include "elf/arc.h"
#include "elf/arm.h"
@@ -1057,6 +1060,9 @@ guess_is_rela (unsigned int e_machine)
case EM_AARCH64:
case EM_ADAPTEVA_EPIPHANY:
case EM_ALPHA:
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+#endif
case EM_ALTERA_NIOS2:
case EM_ARC:
case EM_ARC_COMPACT:
@@ -1731,6 +1737,12 @@ dump_relocations (Filedata *filedata,
rtype = elf_alpha_reloc_type (type);
break;
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ rtype = elf_sw_64_reloc_type (type);
+ break;
+#endif
+
case EM_ARM:
rtype = elf_arm_reloc_type (type);
break;
@@ -1950,6 +1962,47 @@ dump_relocations (Filedata *filedata,
res = false;
}
}
+#ifdef TARGET_SW_64
+ else if (filedata->file_header.e_machine == EM_SW_64 && rtype != NULL
+ && streq (rtype, "R_SW_64_LITUSE"))
+ {
+ switch (rels[i].r_addend)
+ {
+ case LITUSE_SW_64_ADDR:
+ rtype = "ADDR";
+ break;
+ case LITUSE_SW_64_BASE:
+ rtype = "BASE";
+ break;
+ case LITUSE_SW_64_BYTOFF:
+ rtype = "BYTOFF";
+ break;
+ case LITUSE_SW_64_JSR:
+ rtype = "JSR";
+ break;
+ case LITUSE_SW_64_TLSGD:
+ rtype = "TLSGD";
+ break;
+ case LITUSE_SW_64_TLSLDM:
+ rtype = "TLSLDM";
+ break;
+ case LITUSE_SW_64_JSRDIRECT:
+ rtype = "JSRDIRECT";
+ break;
+ default:
+ rtype = NULL;
+ }
+ if (rtype)
+ printf (" (%s)", rtype);
+ else
+ {
+ putchar (' ');
+ printf (_ ("<unknown addend: %lx>"),
+ (unsigned long) rels[i].r_addend);
+ res = false;
+ }
+ }
+#endif
else if (symtab_index)
{
if (symtab == NULL || symtab_index >= nsyms)
@@ -2355,6 +2408,20 @@ get_alpha_dynamic_type (unsigned long type)
}
}
+#ifdef TARGET_SW_64
+static const char *
+get_sw_64_dynamic_type (unsigned long type)
+{
+ switch (type)
+ {
+ case DT_SW_64_PLTRO:
+ return "SW_64_PLTRO";
+ default:
+ return NULL;
+ }
+}
+#endif
+
static const char *
get_score_dynamic_type (unsigned long type)
{
@@ -2559,6 +2626,11 @@ get_dynamic_type (Filedata * filedata, unsigned long type)
case EM_ALPHA:
result = get_alpha_dynamic_type (type);
break;
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ result = get_sw_64_dynamic_type (type);
+ break;
+#endif
case EM_SCORE:
result = get_score_dynamic_type (type);
break;
@@ -2979,6 +3051,10 @@ get_machine_name (unsigned e_machine)
/* Large numbers... */
case EM_MT: return "Morpho Techologies MT processor";
case EM_ALPHA: return "Alpha";
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ return "Sw_64";
+#endif
case EM_WEBASSEMBLY: return "Web Assembly";
case EM_DLX: return "OpenDLX";
case EM_XSTORMY16: return "Sanyo XStormy16 CPU core";
@@ -11092,6 +11168,7 @@ get_num_dynamic_syms (Filedata * filedata)
unsigned int hash_ent_size = 4;
if ((filedata->file_header.e_machine == EM_ALPHA
+ || filedata->file_header.e_machine == EM_SW_64
|| filedata->file_header.e_machine == EM_S390
|| filedata->file_header.e_machine == EM_S390_OLD)
&& filedata->file_header.e_ident[EI_CLASS] == ELFCLASS64)
@@ -12791,6 +12868,23 @@ get_alpha_symbol_other (unsigned int other)
}
}
+#ifdef TARGET_SW_64
+static const char *
+get_sw_64_symbol_other (unsigned int other)
+{
+ switch (other)
+ {
+ case STO_SW_64_NOPV:
+ return "NOPV";
+ case STO_SW_64_STD_GPLOAD:
+ return "STD GPLOAD";
+ default:
+ error (_ ("Unrecognized sw_64 specific other value: %u"), other);
+ return _ ("<unknown>");
+ }
+}
+#endif
+
static const char *
get_solaris_symbol_visibility (unsigned int visibility)
{
@@ -12955,6 +13049,11 @@ get_symbol_other (Filedata * filedata, unsigned int other)
case EM_ALPHA:
result = get_alpha_symbol_other (other);
break;
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ result = get_sw_64_symbol_other (other);
+ break;
+#endif
case EM_AARCH64:
result = get_aarch64_symbol_other (other);
break;
@@ -14320,6 +14419,10 @@ is_32bit_abs_reloc (Filedata * filedata, unsigned int reloc_type)
return reloc_type == 3;
case EM_ALPHA:
return reloc_type == 1; /* R_ALPHA_REFLONG. */
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ return reloc_type == 1; /* R_SW_64_REFLONG. */
+#endif
case EM_ARC:
return reloc_type == 1; /* R_ARC_32. */
case EM_ARC_COMPACT:
@@ -14516,6 +14619,10 @@ is_32bit_pcrel_reloc (Filedata * filedata, unsigned int reloc_type)
return reloc_type == 6;
case EM_ALPHA:
return reloc_type == 10; /* R_ALPHA_SREL32. */
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ return reloc_type == 10; /* R_SW_64_SREL32. */
+#endif
case EM_ARC_COMPACT:
case EM_ARC_COMPACT2:
return reloc_type == 49; /* R_ARC_32_PCREL. */
@@ -14586,6 +14693,10 @@ is_64bit_abs_reloc (Filedata * filedata, unsigned int reloc_type)
return reloc_type == 257; /* R_AARCH64_ABS64. */
case EM_ALPHA:
return reloc_type == 2; /* R_ALPHA_REFQUAD. */
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ return reloc_type == 2; /* R_SW_64_REFQUAD. */
+#endif
case EM_IA_64:
return (reloc_type == 0x26 /* R_IA64_DIR64MSB. */
|| reloc_type == 0x27 /* R_IA64_DIR64LSB. */);
@@ -14630,6 +14741,10 @@ is_64bit_pcrel_reloc (Filedata * filedata, unsigned int reloc_type)
return reloc_type == 260; /* R_AARCH64_PREL64. */
case EM_ALPHA:
return reloc_type == 11; /* R_ALPHA_SREL64. */
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+ return reloc_type == 11; /* R_SW_64_SREL64. */
+#endif
case EM_IA_64:
return (reloc_type == 0x4e /* R_IA64_PCREL64MSB. */
|| reloc_type == 0x4f /* R_IA64_PCREL64LSB. */);
@@ -14965,6 +15080,9 @@ is_none_reloc (Filedata * filedata, unsigned int reloc_type)
case EM_68K: /* R_68K_NONE. */
case EM_ADAPTEVA_EPIPHANY:
case EM_ALPHA: /* R_ALPHA_NONE. */
+#ifdef TARGET_SW_64
+ case EM_SW_64: /* R_SW_64_NONE. */
+#endif
case EM_ALTERA_NIOS2: /* R_NIOS2_NONE. */
case EM_ARC: /* R_ARC_NONE. */
case EM_ARC_COMPACT2: /* R_ARC_NONE. */
@@ -20755,6 +20873,9 @@ get_netbsd_elfcore_note_type (Filedata * filedata, unsigned e_type)
case EM_OLD_ALPHA:
case EM_ALPHA:
+#ifdef TARGET_SW_64
+ case EM_SW_64:
+#endif
case EM_SPARC:
case EM_SPARC32PLUS:
case EM_SPARCV9:
diff --git a/binutils/testsuite/binutils-all/nm.exp b/binutils/testsuite/binutils-all/nm.exp
index 91b519d9..a5bccc7b 100644
--- a/binutils/testsuite/binutils-all/nm.exp
+++ b/binutils/testsuite/binutils-all/nm.exp
@@ -44,7 +44,7 @@ if {![binutils_assemble $srcdir/$subdir/bintest.s tmpdir/bintest.o]} then {
# This test does not work correctly on ECOFF targets, because ECOFF
# stores most symbols twice, which messes up the nm output.
- setup_xfail "alpha*-*-*ecoff" "alpha*-*-osf*"
+ setup_xfail "alpha*-*-*ecoff" "alpha*-*-osf*" "sw_64*-*-*ecoff" "sw_64*-*-osf*"
# This test does not work correctly on XCOFF targets, because XCOFF
# does not enter static symbols in the symbol table.
@@ -102,7 +102,7 @@ if {![binutils_assemble $srcdir/$subdir/bintest.s tmpdir/bintest.o]} then {
# This test does not work correctly on ECOFF targets, because ECOFF
# stores most symbols twice, which messes up the nm output.
- setup_xfail "alpha*-*-*ecoff" "alpha*-*-osf*"
+ setup_xfail "alpha*-*-*ecoff" "alpha*-*-osf*" "sw_64*-*-*ecoff" "sw_64*-*-osf*"
# This test does not work correctly on XCOFF targets, because XCOFF
# does not enter static symbols in the symbol table.
@@ -251,7 +251,7 @@ if [is_elf_format] {
setup_xfail "sh*-*-*"
# The pre-compiled dwarf info in dw4.s is not compatible with the
# ALPHA, HPPA, IA64 and MIPS targets.
- setup_xfail "alpha*-*-*" "hppa*-*-*" "ia64*-*-*" "mips*-*-*"
+ setup_xfail "alpha*-*-*" "sw_64*-*-*" "hppa*-*-*" "ia64*-*-*" "mips*-*-*"
# Assembling the source file triggers an ICE in the FT32 assembler.
# FIXME: Fix the ICE...
setup_xfail "ft32-*-*"
@@ -291,7 +291,7 @@ if [is_elf_format] {
# Test nm --ifunc-chars on a indirect symbols.
# The following targets are known to not support ifuncs.
- setup_xfail "alpha*-*-*"
+ setup_xfail "alpha*-*-*" "sw_64*-*-*"
setup_xfail "arm*-elf" "arm*-*-nto*" "arm*-*-netbsd*"
setup_xfail "*-*-hpux*"
setup_xfail "mips*-*-*" "tx39*-*-*"
diff --git a/config.guess b/config.guess
index 354a8ccd..24e06352 100755
--- a/config.guess
+++ b/config.guess
@@ -369,6 +369,36 @@ case $UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION in
OSF_REL=`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
GUESS=$UNAME_MACHINE-dec-osf$OSF_REL
;;
+ sw_64:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ SW_64_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The sw_64 \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$SW_64_CPU_TYPE" in
+ "SW6A (21264)")
+ UNAME_MACHINE="sw_64sw6a" ;;
+ "SW6B (21264)")
+ UNAME_MACHINE="sw_64sw6b" ;;
+ "SW6 (21264A)")
+ UNAME_MACHINE="sw_64sw6" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ OSF_REL=`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ GUESS=$UNAME_MACHINE-dec-osf$OSF_REL
+ ;;
Amiga*:UNIX_System_V:4.0:*)
GUESS=m68k-unknown-sysv4
;;
@@ -996,6 +1026,17 @@ EOF
if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
GUESS=$UNAME_MACHINE-unknown-linux-$LIBC
;;
+ sw_64:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null` in
+ SW6A) UNAME_MACHINE=sw_64sw6a ;;
+ SW6B) UNAME_MACHINE=sw_64sw6b ;;
+ SW8A) UNAME_MACHINE=sw_64sw8a ;;
+ SW6) UNAME_MACHINE=sw_64sw6 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+ GUESS=$UNAME_MACHINE-unknown-linux-${LIBC}
+ ;;
arc:Linux:*:* | arceb:Linux:*:* | arc32:Linux:*:* | arc64:Linux:*:*)
GUESS=$UNAME_MACHINE-unknown-linux-$LIBC
;;
diff --git a/config.sub b/config.sub
index f6ede1d0..72927971 100755
--- a/config.sub
+++ b/config.sub
@@ -1185,6 +1185,7 @@ case $cpu-$vendor in
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \
| alphapca5[67] | alpha64pca5[67] \
+ | sw_64 | sw_64sw6a | sw_64sw6b | sw_64sw8a \
| am33_2.0 \
| amdgcn \
| arc | arceb | arc32 | arc64 \
diff --git a/config/elf.m4 b/config/elf.m4
index 1772a443..537373c3 100644
--- a/config/elf.m4
+++ b/config/elf.m4
@@ -17,7 +17,7 @@ target_elf=no
case $target in
*-darwin* | *-aix* | *-cygwin* | *-mingw* | *-aout* | *-*coff* | \
*-msdosdjgpp* | *-vms* | *-wince* | *-*-pe* | \
- alpha*-dec-osf* | *-interix* | hppa[[12]]*-*-hpux* | \
+ alpha*-dec-osf* | sw_64*-dec-osf* | *-interix* | hppa[[12]]*-*-hpux* | \
nvptx-*-none)
target_elf=no
;;
diff --git a/config/intdiv0.m4 b/config/intdiv0.m4
index 55dddcf1..75de6873 100644
--- a/config/intdiv0.m4
+++ b/config/intdiv0.m4
@@ -56,7 +56,7 @@ int main ()
[
# Guess based on the CPU.
case "$host_cpu" in
- alpha* | i[34567]86 | m68k | s390*)
+ alpha* | sw_64* | i[34567]86 | m68k | s390*)
gt_cv_int_divbyzero_sigfpe="guessing yes";;
*)
gt_cv_int_divbyzero_sigfpe="guessing no";;
diff --git a/config/picflag.m4 b/config/picflag.m4
index 3f3ac744..3cd3c8b3 100644
--- a/config/picflag.m4
+++ b/config/picflag.m4
@@ -20,6 +20,9 @@ case "${$2}" in
alpha*-dec-osf5*)
# PIC is the default.
;;
+ sw_64*-dec-osf5*)
+ # PIC is the default.
+ ;;
hppa*64*-*-hpux*)
# PIC is the default for 64-bit PA HP-UX.
;;
diff --git a/config/tcl.m4 b/config/tcl.m4
index 4542a4b2..e33d6280 100644
--- a/config/tcl.m4
+++ b/config/tcl.m4
@@ -1368,6 +1368,9 @@ dnl AC_CHECK_TOOL(AR, ar)
if test "`uname -m`" = "alpha" ; then
CFLAGS="$CFLAGS -mieee"
fi
+ if test "`uname -m`" = "sw_64" ; then
+ CFLAGS="$CFLAGS -mieee"
+ fi
if test $do64bit = yes; then
AC_CACHE_CHECK([if compiler accepts -m64 flag], tcl_cv_cc_m64, [
hold_cflags=$CFLAGS
@@ -1415,8 +1418,7 @@ dnl AC_CHECK_TOOL(AR, ar)
CC_SEARCH_FLAGS=""
LD_SEARCH_FLAGS=""])
fi
- if test "`uname -m`" = "alpha" ; then
- CFLAGS="$CFLAGS -mieee"
+ if test "`uname -m`" = "alpha" ; then CFLAGS="$CFLAGS -mieee"
fi
;;
Lynx*)
diff --git a/configure b/configure
index 9cb953a1..cb344197 100755
--- a/configure
+++ b/configure
@@ -792,6 +792,7 @@ ac_subst_files='serialization_dependencies
host_makefile_frag
target_makefile_frag
alphaieee_frag
+sw_64ieee_frag
ospace_frag'
ac_user_opts='
enable_option_checking
@@ -3736,6 +3737,9 @@ case "${target}" in
alpha*-dec-osf*)
noconfigdirs="$noconfigdirs target-newlib target-libgloss"
;;
+ sw_64*-dec-osf*)
+ noconfigdirs="$noconfigdirs target-newlib target-libgloss"
+ ;;
i[3456789]86-*-linux*)
# This section makes it possible to build newlib natively on linux.
# If we are using a cross compiler then don't configure newlib.
@@ -3877,6 +3881,15 @@ case "${target}" in
bpf-*-*)
noconfigdirs="$noconfigdirs target-libobjc target-libbacktrace"
;;
+ sw_64*-dec-osf*)
+ # ld works, but does not support shared libraries.
+ # gas doesn't generate exception information.
+ noconfigdirs="$noconfigdirs gas ld"
+ ;;
+ sw_64*-*-*)
+ # newlib is not 64 bit ready
+ noconfigdirs="$noconfigdirs target-newlib target-libgloss"
+ ;;
sh*-*-pe|mips*-*-pe|*arm-wince-pe)
noconfigdirs="$noconfigdirs tcl tk itcl libgui sim"
;;
@@ -4145,6 +4158,9 @@ fi
alpha*-linux*)
host_makefile_frag="config/mh-alpha-linux"
;;
+ sw_64*-linux*)
+ host_makefile_frag="config/mh-sw_64-linux"
+ ;;
hppa*-hp-hpux10*)
host_makefile_frag="config/mh-pa-hpux10"
;;
@@ -8703,7 +8719,7 @@ target_elf=no
case $target in
*-darwin* | *-aix* | *-cygwin* | *-mingw* | *-aout* | *-*coff* | \
*-msdosdjgpp* | *-vms* | *-wince* | *-*-pe* | \
- alpha*-dec-osf* | *-interix* | hppa[12]*-*-hpux* | \
+ alpha*-dec-osf* | sw_64*-dec-osf* | *-interix* | hppa[12]*-*-hpux* | \
nvptx-*-none)
target_elf=no
;;
@@ -9686,6 +9702,15 @@ case $target in
;;
esac
+sw_64ieee_frag=/dev/null
+case $target in
+ sw_64*-*-*)
+ # This just makes sure to use the -mieee option to build target libs.
+ # This should probably be set individually by each library.
+ sw_64ieee_frag="config/mt-sw_64ieee"
+ ;;
+esac
+
# If --enable-target-optspace always use -Os instead of -O2 to build
# the target libraries, similarly if it is not specified, use -Os
# on selected platforms.
@@ -10418,7 +10443,7 @@ case "${target}" in
esac
# Makefile fragments.
-for frag in host_makefile_frag target_makefile_frag alphaieee_frag ospace_frag;
+for frag in host_makefile_frag target_makefile_frag alphaieee_frag sw_64ieee_frag ospace_frag;
do
eval fragval=\$$frag
if test $fragval != /dev/null; then
diff --git a/configure.ac b/configure.ac
index 2f740926..d0ebb2da 100644
--- a/configure.ac
+++ b/configure.ac
@@ -969,6 +969,9 @@ case "${target}" in
alpha*-dec-osf*)
noconfigdirs="$noconfigdirs target-newlib target-libgloss"
;;
+ sw_64*-dec-osf*)
+ noconfigdirs="$noconfigdirs target-newlib target-libgloss"
+ ;;
i[[3456789]]86-*-linux*)
# This section makes it possible to build newlib natively on linux.
# If we are using a cross compiler then don't configure newlib.
@@ -1110,6 +1113,15 @@ case "${target}" in
bpf-*-*)
noconfigdirs="$noconfigdirs target-libobjc target-libbacktrace"
;;
+ sw_64*-dec-osf*)
+ # ld works, but does not support shared libraries.
+ # gas doesn't generate exception information.
+ noconfigdirs="$noconfigdirs gas ld"
+ ;;
+ sw_64*-*-*)
+ # newlib is not 64 bit ready
+ noconfigdirs="$noconfigdirs target-newlib target-libgloss"
+ ;;
sh*-*-pe|mips*-*-pe|*arm-wince-pe)
noconfigdirs="$noconfigdirs tcl tk itcl libgui sim"
;;
@@ -1359,6 +1371,9 @@ case "${host}" in
alpha*-linux*)
host_makefile_frag="config/mh-alpha-linux"
;;
+ sw_64*-linux*)
+ host_makefile_frag="config/mh-sw_64-linux"
+ ;;
hppa*-hp-hpux10*)
host_makefile_frag="config/mh-pa-hpux10"
;;
@@ -2824,6 +2839,15 @@ case $target in
;;
esac
+sw_64ieee_frag=/dev/null
+case $target in
+ sw_64*-*-*)
+ # This just makes sure to use the -mieee option to build target libs.
+ # This should probably be set individually by each library.
+ sw_64ieee_frag="config/mt-sw_64ieee"
+ ;;
+esac
+
# If --enable-target-optspace always use -Os instead of -O2 to build
# the target libraries, similarly if it is not specified, use -Os
# on selected platforms.
@@ -3551,7 +3575,7 @@ case "${target}" in
esac
# Makefile fragments.
-for frag in host_makefile_frag target_makefile_frag alphaieee_frag ospace_frag;
+for frag in host_makefile_frag target_makefile_frag alphaieee_frag sw_64ieee_frag ospace_frag;
do
eval fragval=\$$frag
if test $fragval != /dev/null; then
@@ -3561,6 +3585,7 @@ done
AC_SUBST_FILE(host_makefile_frag)
AC_SUBST_FILE(target_makefile_frag)
AC_SUBST_FILE(alphaieee_frag)
+AC_SUBST_FILE(sw_64ieee_frag)
AC_SUBST_FILE(ospace_frag)
# Miscellanea: directories, flags, etc.
diff --git a/gas/Makefile.am b/gas/Makefile.am
index f8770e83..1f6a38fa 100644
--- a/gas/Makefile.am
+++ b/gas/Makefile.am
@@ -138,6 +138,7 @@ HFILES = \
TARGET_CPU_CFILES = \
config/tc-aarch64.c \
config/tc-alpha.c \
+ config/tc-sw_64.c \
config/tc-arc.c \
config/tc-arm.c \
config/tc-avr.c \
@@ -213,6 +214,7 @@ TARGET_CPU_CFILES = \
TARGET_CPU_HFILES = \
config/tc-aarch64.h \
config/tc-alpha.h \
+ config/tc-sw_64.h \
config/tc-arc.h \
config/tc-arm.h \
config/tc-avr.h \
diff --git a/gas/Makefile.in b/gas/Makefile.in
index 427f42df..78248678 100644
--- a/gas/Makefile.in
+++ b/gas/Makefile.in
@@ -625,6 +625,7 @@ HFILES = \
TARGET_CPU_CFILES = \
config/tc-aarch64.c \
config/tc-alpha.c \
+ config/tc-sw_64.c \
config/tc-arc.c \
config/tc-arm.c \
config/tc-avr.c \
@@ -700,6 +701,7 @@ TARGET_CPU_CFILES = \
TARGET_CPU_HFILES = \
config/tc-aarch64.h \
config/tc-alpha.h \
+ config/tc-sw_64.h \
config/tc-arc.h \
config/tc-arm.h \
config/tc-avr.h \
@@ -1084,6 +1086,8 @@ config/tc-aarch64.$(OBJEXT): config/$(am__dirstamp) \
config/$(DEPDIR)/$(am__dirstamp)
config/tc-alpha.$(OBJEXT): config/$(am__dirstamp) \
config/$(DEPDIR)/$(am__dirstamp)
+config/tc-sw_64.$(OBJEXT): config/$(am__dirstamp) \
+ config/$(DEPDIR)/$(am__dirstamp)
config/tc-arc.$(OBJEXT): config/$(am__dirstamp) \
config/$(DEPDIR)/$(am__dirstamp)
config/tc-arm.$(OBJEXT): config/$(am__dirstamp) \
@@ -1360,6 +1364,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@config/$(DEPDIR)/rx-parse.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@config/$(DEPDIR)/tc-aarch64.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@config/$(DEPDIR)/tc-alpha.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@config/$(DEPDIR)/tc-sw_64.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@config/$(DEPDIR)/tc-arc.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@config/$(DEPDIR)/tc-arm.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@config/$(DEPDIR)/tc-avr.Po@am__quote@
diff --git a/gas/as.c b/gas/as.c
index 6839c841..844b85fe 100644
--- a/gas/as.c
+++ b/gas/as.c
@@ -57,6 +57,9 @@
extern void gas_cgen_begin (void);
#endif
+expressionS toksave[5];
+int nop_quantity=0;
+
/* We build a list of defsyms as we read the options, and then define
them after we have initialized everything. */
struct defsym_list
@@ -511,6 +514,7 @@ parse_args (int * pargc, char *** pargv)
OPTION_NOCOMPRESS_DEBUG,
OPTION_NO_PAD_SECTIONS,
OPTION_MULTIBYTE_HANDLING, /* = STD_BASE + 40 */
+ OPTION_ORG_BACKWARDS,
OPTION_SFRAME
/* When you add options here, check that they do
not collide with OPTION_MD_BASE. See as.h. */
@@ -594,6 +598,7 @@ parse_args (int * pargc, char *** pargv)
,{"traditional-format", no_argument, NULL, OPTION_TRADITIONAL_FORMAT}
,{"warn", no_argument, NULL, OPTION_WARN}
,{"multibyte-handling", required_argument, NULL, OPTION_MULTIBYTE_HANDLING}
+ ,{"pal", no_argument, NULL, OPTION_ORG_BACKWARDS}
};
/* Construct the option lists from the standard list and the target
@@ -696,6 +701,12 @@ parse_args (int * pargc, char *** pargv)
flag_traditional_format = 1;
break;
+#ifdef TARGET_SW_64
+ case OPTION_ORG_BACKWARDS:
+ pal_org_backwrards =1;
+ break;
+#endif
+
case OPTION_MULTIBYTE_HANDLING:
if (strcmp (optarg, "allow") == 0)
multibyte_handling = multibyte_allow;
diff --git a/gas/as.h b/gas/as.h
index 99ffe77a..aa11891b 100644
--- a/gas/as.h
+++ b/gas/as.h
@@ -361,6 +361,11 @@ COMMON int linkrelax;
COMMON int do_not_pad_sections_to_alignment;
+#ifdef TARGET_SW_64
+/* Support -pal for hmcode. */
+COMMON int pal_org_backwrards;
+#endif
+
enum multibyte_input_handling
{
multibyte_allow = 0,
diff --git a/gas/config.in b/gas/config.in
index 232bc350..878e1b5b 100644
--- a/gas/config.in
+++ b/gas/config.in
@@ -331,3 +331,11 @@
/* Define to 1 if you need to in order for `stat' and other things to work. */
#undef _POSIX_SOURCE
+
+/* Default CPU for SW_64 targets. */
+#undef SW_CPU_STRING_DEFAULT
+
+/* Define Git verion for elf flags. */
+#undef GIT_REVISION
+
+#undef TARGET_SW_64
diff --git a/gas/config/obj-ecoff.c b/gas/config/obj-ecoff.c
index 26da2af0..1409727b 100644
--- a/gas/config/obj-ecoff.c
+++ b/gas/config/obj-ecoff.c
@@ -128,6 +128,16 @@ ecoff_frob_file_before_fix (void)
fprmask = alpha_fprmask;
#endif
+#ifdef TC_SW_64
+ sw_64_frob_ecoff_data ();
+
+ if (!bfd_ecoff_set_gp_value (stdoutput, sw_64_gp_value))
+ as_fatal (_ ("Can't set GP value"));
+
+ gprmask = sw_64_gprmask;
+ fprmask = sw_64_fprmask;
+#endif
+
if (! bfd_ecoff_set_regmasks (stdoutput, gprmask, fprmask, cprmask))
as_fatal (_("Can't set register masks"));
}
diff --git a/gas/config/obj-elf.c b/gas/config/obj-elf.c
index 753a929f..d59612a8 100644
--- a/gas/config/obj-elf.c
+++ b/gas/config/obj-elf.c
@@ -40,6 +40,12 @@
#include "elf/alpha.h"
#endif
+#ifdef TARGET_SW_64
+#ifdef TC_SW_64
+#include "elf/sw_64.h"
+#endif
+#endif
+
#ifdef TC_MIPS
#include "elf/mips.h"
#endif
diff --git a/gas/config/obj-elf.h b/gas/config/obj-elf.h
index e8fc3126..e09a6760 100644
--- a/gas/config/obj-elf.h
+++ b/gas/config/obj-elf.h
@@ -42,6 +42,11 @@
extern int alpha_flag_mdebug;
#endif
+#ifdef TC_SW_64
+#define ECOFF_DEBUGGING (sw_64_flag_mdebug > 0)
+extern int sw_64_flag_mdebug;
+#endif
+
/* For now, always set ECOFF_DEBUGGING for a MIPS target. */
#ifdef TC_MIPS
#define ECOFF_DEBUGGING mips_flag_mdebug
diff --git a/gas/config/tc-sw_64.c b/gas/config/tc-sw_64.c
new file mode 100644
index 00000000..1893ec4d
--- /dev/null
+++ b/gas/config/tc-sw_64.c
@@ -0,0 +1,7142 @@
+/* tc-sw_64.c - Processor-specific code for the Sw_64 AXP CPU.
+ Copyright (C) 1989-2023 Free Software Foundation, Inc.
+ Contributed by Carnegie Mellon University, 1993.
+ Written by Alessandro Forin, based on earlier gas-1.38 target CPU files.
+ Modified by Ken Raeburn for gas-2.x and ECOFF support.
+ Modified by Richard Henderson for ELF support.
+ Modified by Klaus K"ampf for EVAX (OpenVMS/Sw_64) support.
+
+ This file is part of GAS, the GNU Assembler.
+
+ GAS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GAS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GAS; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+/* Mach Operating System
+ Copyright (c) 1993 Carnegie Mellon University
+ All Rights Reserved.
+
+ Permission to use, copy, modify and distribute this software and its
+ documentation is hereby granted, provided that both the copyright
+ notice and this permission notice appear in all copies of the
+ software, derivative works or modified versions, and any portions
+ thereof, and that both notices appear in supporting documentation.
+
+ CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+ CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+
+ Carnegie Mellon requests users of this software to return to
+
+ Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ School of Computer Science
+ Carnegie Mellon University
+ Pittsburgh PA 15213-3890
+
+ any improvements or extensions that they make and grant Carnegie the
+ rights to redistribute these changes. */
+
+#include "as.h"
+#include "subsegs.h"
+#include "ecoff.h"
+
+#include "opcode/sw_64.h"
+
+#ifdef OBJ_ELF
+#include "elf/sw_64.h"
+#endif
+
+#ifdef OBJ_EVAX
+#include "vms.h"
+#include "vms/egps.h"
+#endif
+
+#include "dwarf2dbg.h"
+#include "dw2gencfi.h"
+#include "safe-ctype.h"
+
+/* Local types. */
+
+#define TOKENIZE_ERROR -1
+#define TOKENIZE_ERROR_REPORT -2
+#define MAX_INSN_FIXUPS 2
+#define MAX_INSN_ARGS 5
+
+/* Used since new relocation types are introduced in this
+ file (DUMMY_RELOC_LITUSE_*) */
+typedef int extended_bfd_reloc_code_real_type;
+
+struct sw_64_fixup
+{
+ expressionS exp;
+ /* bfd_reloc_code_real_type reloc; */
+ extended_bfd_reloc_code_real_type reloc;
+#ifdef OBJ_EVAX
+ /* The symbol of the item in the linkage section. */
+ symbolS *xtrasym;
+
+ /* The symbol of the procedure descriptor. */
+ symbolS *procsym;
+#endif
+};
+
+struct sw_64_insn
+{
+ unsigned insn;
+ int nfixups;
+ struct sw_64_fixup fixups[MAX_INSN_FIXUPS];
+ long sequence;
+};
+
+enum sw_64_macro_arg
+{
+ MACRO_EOA = 1,
+ MACRO_IR,
+ MACRO_PIR,
+ MACRO_OPIR,
+ MACRO_CPIR,
+ MACRO_FPR,
+ MACRO_EXP
+};
+
+struct sw_64_macro
+{
+ const char *name;
+ void (*emit) (const expressionS *, int, const void *);
+ const void *arg;
+ enum sw_64_macro_arg argsets[16];
+};
+
+/* Extra expression types. */
+
+#define O_pregister O_md1 /* O_register, in parentheses. */
+#define O_cpregister O_md2 /* + a leading comma. */
+
+/* The sw_64_reloc_op table below depends on the ordering of these. */
+#define O_literal O_md3 /* !literal relocation. */
+#define O_lituse_addr O_md4 /* !lituse_addr relocation. */
+#define O_lituse_base O_md5 /* !lituse_base relocation. */
+#define O_lituse_bytoff O_md6 /* !lituse_bytoff relocation. */
+#define O_lituse_jsr O_md7 /* !lituse_jsr relocation. */
+#define O_lituse_tlsgd O_md8 /* !lituse_tlsgd relocation. */
+#define O_lituse_tlsldm O_md9 /* !lituse_tlsldm relocation. */
+#define O_lituse_jsrdirect O_md10 /* !lituse_jsrdirect relocation. */
+#define O_gpdisp O_md11 /* !gpdisp relocation. */
+#define O_gprelhigh O_md12 /* !gprelhigh relocation. */
+#define O_gprellow O_md13 /* !gprellow relocation. */
+#define O_gprel O_md14 /* !gprel relocation. */
+#define O_samegp O_md15 /* !samegp relocation. */
+#define O_tlsgd O_md16 /* !tlsgd relocation. */
+#define O_tlsldm O_md17 /* !tlsldm relocation. */
+#define O_gotdtprel O_md18 /* !gotdtprel relocation. */
+#define O_dtprelhi O_md19 /* !dtprelhi relocation. */
+#define O_dtprello O_md20 /* !dtprello relocation. */
+#define O_dtprel O_md21 /* !dtprel relocation. */
+#define O_gottprel O_md22 /* !gottprel relocation. */
+#define O_tprelhi O_md23 /* !tprelhi relocation. */
+#define O_tprello O_md24 /* !tprello relocation. */
+#define O_tprel O_md25 /* !tprel relocation. */
+#define O_tlsrel_got O_md26 /* !tlsrel_got relocation. */
+#define O_literal_got O_md27 /* !literal_got relocation. */
+
+#define DUMMY_RELOC_LITUSE_ADDR (BFD_RELOC_UNUSED + 1)
+#define DUMMY_RELOC_LITUSE_BASE (BFD_RELOC_UNUSED + 2)
+#define DUMMY_RELOC_LITUSE_BYTOFF (BFD_RELOC_UNUSED + 3)
+#define DUMMY_RELOC_LITUSE_JSR (BFD_RELOC_UNUSED + 4)
+#define DUMMY_RELOC_LITUSE_TLSGD (BFD_RELOC_UNUSED + 5)
+#define DUMMY_RELOC_LITUSE_TLSLDM (BFD_RELOC_UNUSED + 6)
+#define DUMMY_RELOC_LITUSE_JSRDIRECT (BFD_RELOC_UNUSED + 7)
+
+#define USER_RELOC_P(R) ((R) >= O_literal && (R) <= O_literal_got)
+/* Macros for extracting the type and number of encoded register tokens. */
+
+#define is_ir_num(x) (((x) &32) == 0)
+#define is_fpr_num(x) (((x) &32) != 0)
+#define regno(x) ((x) &31)
+
+/* Something odd inherited from the old assembler. */
+
+#define note_gpreg(R) (sw_64_gprmask |= (1 << (R)))
+#define note_fpreg(R) (sw_64_fprmask |= (1 << (R)))
+
+/* Predicates for 16- and 32-bit ranges. */
+/* XXX: The non-shift version appears to trigger a compiler bug when
+ cross-assembling from x86 w/ gcc 2.7.2. */
+
+#if 1
+#define range_signed_16(x) \
+ (((offsetT) (x) >> 15) == 0 || ((offsetT) (x) >> 15) == -1)
+#define range_signed_32(x) \
+ (((offsetT) (x) >> 31) == 0 || ((offsetT) (x) >> 31) == -1)
+#else
+#define range_signed_16(x) \
+ ((offsetT) (x) >= -(offsetT) 0x8000 && (offsetT) (x) <= (offsetT) 0x7FFF)
+#define range_signed_32(x) \
+ ((offsetT) (x) >= -(offsetT) 0x80000000 \
+ && (offsetT) (x) <= (offsetT) 0x7FFFFFFF)
+#endif
+
+/* Macros for sign extending from 16- and 32-bits. */
+/* XXX: The cast macros will work on all the systems that I care about,
+ but really a predicate should be found to use the non-cast forms. */
+
+#define sign_extend_16(x) ((short) (x))
+#define sign_extend_32(x) ((int) (x))
+
+/* Macros to build tokens. */
+
+#define set_tok_reg(t, r) \
+ (memset (&(t), 0, sizeof (t)), (t).X_op = O_register, (t).X_add_number = (r))
+#define set_tok_preg(t, r) \
+ (memset (&(t), 0, sizeof (t)), (t).X_op = O_pregister, (t).X_add_number = (r))
+#define set_tok_cpreg(t, r) \
+ (memset (&(t), 0, sizeof (t)), (t).X_op = O_cpregister, \
+ (t).X_add_number = (r))
+#define set_tok_freg(t, r) \
+ (memset (&(t), 0, sizeof (t)), (t).X_op = O_register, \
+ (t).X_add_number = (r) + 32)
+#define set_tok_sym(t, s, a) \
+ (memset (&(t), 0, sizeof (t)), (t).X_op = O_symbol, (t).X_add_symbol = (s), \
+ (t).X_add_number = (a))
+#define set_tok_const(t, n) \
+ (memset (&(t), 0, sizeof (t)), (t).X_op = O_constant, (t).X_add_number = (n))
+
+/* Generic assembler global variables which must be defined by all
+ targets. */
+
+/* Characters which always start a comment. */
+const char comment_chars[] = "#";
+
+/* Characters which start a comment at the beginning of a line. */
+const char line_comment_chars[] = "#";
+
+/* Characters which may be used to separate multiple commands on a
+ single line. */
+const char line_separator_chars[] = ";";
+
+/* Characters which are used to indicate an exponent in a floating
+ point number. */
+const char EXP_CHARS[] = "eE";
+
+/* Characters which mean that a number is a floating point constant,
+ as in 0d1.0. */
+/* XXX: Do all of these really get used on the sw_64?? */
+const char FLT_CHARS[] = "rRsSfFdDxXpP";
+
+extern expressionS toksave[MAX_INSN_ARGS];
+extern int nop_quantity;
+static int sw_64_branch_separate = 0;
+
+/* The argument of the -march= flag. The architecture we are assembling. */
+static int file_sw_64_arch = CPU_UNKNOWN;
+static const char *sw_64_arch_string;
+
+extern fixS *frags_pre_fixup;
+
+struct sw_64_cpu_info
+{
+ const char *name; /* CPU or ISA name. */
+ int flags; /* SW_64_CPU_* flags. */
+ int ase; /* Set of ASEs implemented by the CPU. */
+ int isa; /* ISA level. */
+ int cpu; /* CPU number (default CPU if ISA). */
+};
+
+#define SW_64_CPU_IS_ISA 0x0001 /* Is this an ISA? (If 0, a CPU.) */
+
+struct sw_64_set_options
+{
+ int isa;
+ int ase;
+ int sw_64_tmp;
+ int micromips;
+ int noreorder;
+ unsigned int at;
+ int warn_about_macros;
+ int nomove;
+ int nobopt;
+ int noautoextend;
+ bfd_boolean insn32;
+ int gp32;
+ int fp32;
+ int arch;
+ bfd_boolean sym32;
+ bfd_boolean soft_float;
+ bfd_boolean single_float;
+};
+
+static struct sw_64_set_options sw_64_opts = {
+ /* isa */ ISA_UNKNOWN,
+ /* ase */ 0,
+ /* sw_64_tmp */ -1,
+ /* micromips */ -1,
+ /* noreorder */ 0,
+ /* at */ FALSE,
+ /* warn_about_macros */ 0,
+ /* nomove */ 0,
+ /* nobopt */ 0,
+ /* noautoextend */ 0,
+ /* insn32 */ FALSE,
+ /* gp32 */ 0,
+ /* fp32 */ 0,
+ /* arch */ CPU_UNKNOWN,
+ /* sym32 */ FALSE,
+ /* soft_float */ FALSE,
+ /* single_float */ FALSE};
+
+#ifdef OBJ_EVAX
+const char *md_shortopts = "Fm:g+1h:HG:";
+#else
+const char *md_shortopts = "Fm:gG:";
+#endif
+
+struct option md_longopts[] = {
+#define OPTION_GPREL16 (OPTION_MD_BASE)
+ {"gprel16", no_argument, NULL, OPTION_GPREL16},
+#define OPTION_LITERALGOT (OPTION_GPREL16 + 1)
+ {"literalgot", no_argument, NULL, OPTION_LITERALGOT},
+#define OPTION_TLSRELGOT_GOTTPREL (OPTION_LITERALGOT + 1)
+ {"tlsrelgot_gottprel", no_argument, NULL, OPTION_TLSRELGOT_GOTTPREL},
+#define OPTION_TLSRELGOT_GOTDTPREL (OPTION_TLSRELGOT_GOTTPREL + 1)
+ {"tlsrelgot_gotdtprel", no_argument, NULL, OPTION_TLSRELGOT_GOTDTPREL},
+#define OPTION_TLSRELGOT_TLSLDM (OPTION_TLSRELGOT_GOTDTPREL + 1)
+ {"tlsrelgot_tlsldm", no_argument, NULL, OPTION_TLSRELGOT_TLSLDM},
+#define OPTION_TLSRELGOT_TLSGD (OPTION_TLSRELGOT_TLSLDM + 1)
+ {"tlsrelgot_tlsgd", no_argument, NULL, OPTION_TLSRELGOT_TLSGD},
+#define OPTION_32ADDR (OPTION_TLSRELGOT_TLSGD + 1)
+ {"32addr", no_argument, NULL, OPTION_32ADDR},
+#define OPTION_NOCHECK_SAMEREG (OPTION_32ADDR + 1)
+ {"nocheck-samereg", no_argument, NULL, OPTION_NOCHECK_SAMEREG},
+#define OPTION_RELAX (OPTION_NOCHECK_SAMEREG + 1)
+ {"relax", no_argument, NULL, OPTION_RELAX},
+#ifdef OBJ_ELF
+#define OPTION_MDEBUG (OPTION_RELAX + 1)
+#define OPTION_NO_MDEBUG (OPTION_MDEBUG + 1)
+ {"mdebug", no_argument, NULL, OPTION_MDEBUG},
+ {"no-mdebug", no_argument, NULL, OPTION_NO_MDEBUG},
+#endif
+#ifdef OBJ_EVAX
+#define OPTION_REPLACE (OPTION_RELAX + 1)
+#define OPTION_NOREPLACE (OPTION_REPLACE + 1)
+ {"replace", no_argument, NULL, OPTION_REPLACE},
+ {"noreplace", no_argument, NULL, OPTION_NOREPLACE},
+#endif
+#define OPTION_BRANCH_SEPARATE (OPTION_RELAX + 3)
+#define OPTION_NOBRANCH_SEPARATE (OPTION_RELAX + 4)
+ {"branch-separate", no_argument, NULL, OPTION_BRANCH_SEPARATE},
+ {"nobranch-separate", no_argument, NULL, OPTION_NOBRANCH_SEPARATE},
+ {NULL, no_argument, NULL, 0}};
+
+size_t md_longopts_size = sizeof (md_longopts);
+
+#ifdef OBJ_EVAX
+#define AXP_REG_R0 0
+#define AXP_REG_R16 16
+#define AXP_REG_R17 17
+#undef AXP_REG_T9
+#define AXP_REG_T9 22
+#undef AXP_REG_T10
+#define AXP_REG_T10 23
+#undef AXP_REG_T11
+#define AXP_REG_T11 24
+#undef AXP_REG_T12
+#define AXP_REG_T12 25
+#define AXP_REG_AI 25
+#undef AXP_REG_FP
+#define AXP_REG_FP 29
+
+#undef AXP_REG_GP
+#define AXP_REG_GP AXP_REG_PV
+
+#endif /* OBJ_EVAX */
+
+/* The cpu for which we are generating code. */
+static unsigned sw_64_target;
+static const char *sw_64_target_name;
+
+/* The hash table of instruction opcodes. */
+static htab_t sw_64_opcode_hash;
+
+/* The hash table of macro opcodes. */
+static htab_t sw_64_macro_hash;
+
+#ifdef OBJ_ECOFF
+/* The $gp relocation symbol. */
+static symbolS *sw_64_gp_symbol;
+
+/* XXX: what is this, and why is it exported? */
+valueT sw_64_gp_value;
+#endif
+
+/* The current $gp register. */
+static int sw_64_gp_register = AXP_REG_GP;
+
+/* A table of the register symbols. */
+static symbolS *sw_64_register_table[64];
+
+/* Constant sections, or sections of constants. */
+#ifdef OBJ_ECOFF
+static segT sw_64_lita_section;
+#endif
+#ifdef OBJ_EVAX
+segT sw_64_link_section;
+#endif
+#ifndef OBJ_EVAX
+static segT sw_64_lit8_section;
+#endif
+
+/* Symbols referring to said sections. */
+#ifdef OBJ_ECOFF
+static symbolS *sw_64_lita_symbol;
+#endif
+#ifdef OBJ_EVAX
+static symbolS *sw_64_link_symbol;
+#endif
+#ifndef OBJ_EVAX
+static symbolS *sw_64_lit8_symbol;
+#endif
+
+/* Literal for .litX+0x8000 within .lita. */
+#ifdef OBJ_ECOFF
+static offsetT sw_64_lit8_literal;
+#endif
+
+/* Is the assembler not allowed to use $at? */
+static int sw_64_noat_on = 0;
+
+/* Are macros enabled? */
+static int sw_64_macros_on = 1;
+
+/* Are floats disabled? */
+static int sw_64_nofloats_on = 0;
+
+/* Are addresses 32 bit? */
+static int sw_64_addr32_on = 0;
+
+/* Insert ldih instruction with tlsrel_got relocation before ldi instruction
+ * with tlsgd relocation. */
+static int sw_64_tlsrelgot_tlsgd_on = 0;
+
+/* Insert ldih instruction with tlsrel_got relocation before ldi instruction
+ * with tlsldm relocation. */
+static int sw_64_tlsrelgot_tlsldm_on = 0;
+
+/* Insert ldih instruction with literal_got relocation before ldl instruction
+ * with literal relocation. */
+static int sw_64_literalgot_on = 1;
+
+/* Change gprel16 relocation to gprelhi+gprello relocation with ldih insn and
+ * ldi/ldw/flds/fldd insn. */
+static int sw_64_gprel16_on = 0;
+
+/* Insert ldih instruction with tlsrel_got relocation before ldl instruction
+ * with gottprel relocation. */
+static int sw_64_tlsrelgot_gottprel_on = 0;
+
+/* Insert ldih instruction with tlsrel_got relocation before ldl instruction
+ * with gotdtprel relocation. */
+static int sw_64_tlsrelgot_gotdtprel_on = 0;
+
+/* Symbol labelling the current insn. When the Sw_64 gas sees
+ foo:
+ .quad 0
+ and the section happens to not be on an eight byte boundary, it
+ will align both the symbol and the .quad to an eight byte boundary. */
+static symbolS *sw_64_insn_label;
+#if defined (OBJ_ELF) || defined (OBJ_EVAX)
+static symbolS *sw_64_prologue_label;
+#endif
+
+#ifdef OBJ_EVAX
+/* Symbol associate with the current call instruction. */
+static symbolS *sw_64_linkage_symbol;
+#endif
+
+/* Whether we should automatically align data generation pseudo-ops.
+ .align 0 will turn this off. */
+static int sw_64_auto_align_on = 1;
+
+/* The known current alignment of the current section. */
+static int sw_64_current_align;
+
+/* These are exported to ECOFF code. */
+unsigned long sw_64_gprmask, sw_64_fprmask;
+
+/* Whether the debugging option was seen. */
+static int sw_64_debug;
+
+#ifdef OBJ_ELF
+/* Whether we are emitting an mdebug section. */
+int sw_64_flag_mdebug = -1;
+#endif
+
+#ifdef OBJ_EVAX
+/* Whether to perform the VMS procedure call optimization. */
+int sw_64_flag_replace = 1;
+#endif
+
+/* Don't fully resolve relocations, allowing code movement in the linker. */
+static int sw_64_flag_relax;
+
+/* What value to give to bfd_set_gp_size. */
+static int g_switch_value = 8;
+
+static int sw_64_flag_nocheck_samereg = 0;
+
+#ifdef OBJ_EVAX
+/* Collect information about current procedure here. */
+struct sw_64_evax_procs
+{
+ symbolS *symbol; /* Proc pdesc symbol. */
+ int pdsckind;
+ int framereg; /* Register for frame pointer. */
+ int framesize; /* Size of frame. */
+ int rsa_offset;
+ int ra_save;
+ int fp_save;
+ long imask;
+ long fmask;
+ int type;
+ int prologue;
+ symbolS *handler;
+ int handler_data;
+};
+
+/* Linked list of .linkage fixups. */
+struct sw_64_linkage_fixups *sw_64_linkage_fixup_root;
+static struct sw_64_linkage_fixups *sw_64_linkage_fixup_tail;
+
+/* Current procedure descriptor. */
+static struct sw_64_evax_procs *sw_64_evax_proc;
+static struct sw_64_evax_procs sw_64_evax_proc_data;
+
+static int sw_64_flag_hash_long_names = 0; /* -+ */
+static int sw_64_flag_show_after_trunc = 0; /* -H */
+
+/* If the -+ switch is given, then a hash is appended to any name that is
+ longer than 64 characters, else longer symbol names are truncated. */
+
+#endif
+
+#ifdef RELOC_OP_P
+/* A table to map the spelling of a relocation operand into an appropriate
+ bfd_reloc_code_real_type type. The table is assumed to be ordered such
+ that op-O_literal indexes into it. */
+
+#define SW_64_RELOC_TABLE(op) \
+ (&sw_64_reloc_op[((!USER_RELOC_P (op)) ? (abort (), 0) \
+ : (int) (op) - (int) O_literal)])
+
+#define DEF(NAME, RELOC, REQ, ALLOW) \
+ { \
+#NAME, sizeof (#NAME) - 1, O_##NAME, RELOC, REQ, ALLOW \
+ }
+
+static const struct sw_64_reloc_op_tag
+{
+ const char *name; /* String to lookup. */
+ size_t length; /* Size of the string. */
+ operatorT op; /* Which operator to use. */
+ extended_bfd_reloc_code_real_type reloc;
+ unsigned int require_seq : 1; /* Require a sequence number. */
+ unsigned int allow_seq : 1; /* Allow a sequence number. */
+} sw_64_reloc_op[] = {
+ DEF (literal, BFD_RELOC_SW_64_ELF_LITERAL, 0, 1),
+ DEF (lituse_addr, DUMMY_RELOC_LITUSE_ADDR, 1, 1),
+ DEF (lituse_base, DUMMY_RELOC_LITUSE_BASE, 1, 1),
+ DEF (lituse_bytoff, DUMMY_RELOC_LITUSE_BYTOFF, 1, 1),
+ DEF (lituse_jsr, DUMMY_RELOC_LITUSE_JSR, 1, 1),
+ DEF (lituse_tlsgd, DUMMY_RELOC_LITUSE_TLSGD, 1, 1),
+ DEF (lituse_tlsldm, DUMMY_RELOC_LITUSE_TLSLDM, 1, 1),
+ DEF (lituse_jsrdirect, DUMMY_RELOC_LITUSE_JSRDIRECT, 1, 1),
+ DEF (gpdisp, BFD_RELOC_SW_64_GPDISP, 1, 1),
+ DEF (gprelhigh, BFD_RELOC_SW_64_GPREL_HI16, 0, 0),
+ DEF (gprellow, BFD_RELOC_SW_64_GPREL_LO16, 0, 0),
+ DEF (gprel, BFD_RELOC_GPREL16, 0, 0),
+ DEF (samegp, BFD_RELOC_SW_64_BRSGP, 0, 0),
+ DEF (tlsgd, BFD_RELOC_SW_64_TLSGD, 0, 1),
+ DEF (tlsldm, BFD_RELOC_SW_64_TLSLDM, 0, 1),
+ DEF (gotdtprel, BFD_RELOC_SW_64_GOTDTPREL16, 0, 1),
+ DEF (dtprelhi, BFD_RELOC_SW_64_DTPREL_HI16, 0, 0),
+ DEF (dtprello, BFD_RELOC_SW_64_DTPREL_LO16, 0, 0),
+ DEF (dtprel, BFD_RELOC_SW_64_DTPREL16, 0, 0),
+ DEF (gottprel, BFD_RELOC_SW_64_GOTTPREL16, 0, 1),
+ DEF (tprelhi, BFD_RELOC_SW_64_TPREL_HI16, 0, 0),
+ DEF (tprello, BFD_RELOC_SW_64_TPREL_LO16, 0, 0),
+ DEF (tprel, BFD_RELOC_SW_64_TPREL16, 0, 0),
+ DEF (tlsrel_got, BFD_RELOC_SW_64_TLSREL_GOT, 0, 1),
+ DEF (literal_got, BFD_RELOC_SW_64_ELF_LITERAL_GOT, 0, 1),
+};
+
+#undef DEF
+
+static const int sw_64_num_reloc_op
+ = sizeof (sw_64_reloc_op) / sizeof (*sw_64_reloc_op);
+#endif /* RELOC_OP_P */
+
+/* Maximum # digits needed to hold the largest sequence #. */
+#define SW_64_RELOC_DIGITS 25
+
+/* Structure to hold explicit sequence information. */
+struct sw_64_reloc_tag
+{
+ fixS *master; /* The literal reloc. */
+ fixS *tlsrel_got; /* The tlsrel_got reloc. */
+ fixS *tlsgd; /* The tlsgd reloc. */
+ fixS *tlsldm; /* The tlsldm reloc. */
+ fixS *gotdtprel; /* The gotdtprel reloc. */
+ fixS *gottprel; /* The gottprel reloc. */
+ fixS *literal_got; /* The literal_got reloc. */
+
+#ifdef OBJ_EVAX
+ struct symbol *sym; /* Linkage section item symbol. */
+ struct symbol *psym; /* Pdesc symbol. */
+#endif
+ fixS *slaves; /* Head of linked list of lituses. */
+ segT segment; /* Segment relocs are in or undefined_section. */
+ long sequence; /* Sequence #. */
+ unsigned n_master; /* # of literals. */
+ unsigned n_slaves; /* # of lituses. */
+ unsigned saw_tlsgd : 1; /* True if ... */
+ unsigned saw_tlsldm : 1;
+ unsigned saw_lu_tlsgd : 1;
+ unsigned saw_lu_tlsldm : 1;
+ unsigned multi_section_p : 1; /* True if more than one section was used. */
+ char string[1]; /* Printable form of sequence to hash with. */
+};
+
+/* Hash table to link up literals with the appropriate lituse. */
+static htab_t sw_64_literal_hash;
+
+/* Sequence numbers for internal use by macros. */
+static long next_sequence_num = -1;
+
+/* A table of CPU names and opcode sets. */
+
+static const struct cpu_type
+{
+ const char *name;
+ unsigned flags;
+} cpu_types[] = {{"sw6a", AXP_OPCODE_SW6 | AXP_OPCODE_SW6A},
+ {"sw6b", AXP_OPCODE_SW6 | AXP_OPCODE_SW6B},
+ {"sw8a", AXP_OPCODE_SW6 | AXP_OPCODE_SW8A},
+ {"sw4e", AXP_OPCODE_SW6 | AXP_OPCODE_SW4E},
+
+ {0, 0}};
+
+/* Some instruction sets indexed by lg (size). */
+static const char *const sextX_op[] = {"sextb", "sexth", "sextw", NULL};
+static const char *const insXl_op[] = {"ins0b", "ins1b", "ins2b", "ins3b"};
+static const char *const insXh_op[] = {NULL, "ins5b", "ins6b", "ins7b"};
+static const char *const extXl_op[] = {"ext0b", "ext1b", "ext2b", "ext3b"};
+static const char *const extXh_op[] = {NULL, "ext5b", "ext6b", "ext7b"};
+static const char *const mskXl_op[] = {"mask0b", "mask1b", "mask2b", "mask3b"};
+static const char *const mskXh_op[] = {NULL, "mask5b", "mask6b", "mask7b"};
+static const char *const stX_op[] = {"stb", "stb", "stw", "stl"};
+static const char *const ldXu_op[] = {"ldbu", "ldhu", NULL, NULL};
+
+static void
+assemble_insn (const struct sw_64_opcode *, const expressionS *, int,
+ struct sw_64_insn *, extended_bfd_reloc_code_real_type);
+static void
+emit_insn (struct sw_64_insn *);
+static void
+assemble_tokens (const char *, expressionS *, int, int);
+#ifdef OBJ_EVAX
+static const char *
+s_sw_64_section_name (void);
+static symbolS *
+add_to_link_pool (symbolS *, offsetT);
+#endif
+
+static struct sw_64_reloc_tag *
+get_sw_64_reloc_tag (long sequence)
+{
+ char buffer[SW_64_RELOC_DIGITS];
+ struct sw_64_reloc_tag *info;
+
+ sprintf (buffer, "!%ld", sequence);
+
+ info = (struct sw_64_reloc_tag *) str_hash_find (sw_64_literal_hash, buffer);
+ if (!info)
+ {
+ size_t len = strlen (buffer);
+
+ info = notes_calloc (sizeof (struct sw_64_reloc_tag) + len, 1);
+
+ info->segment = now_seg;
+ info->sequence = sequence;
+ strcpy (info->string, buffer);
+ str_hash_insert (sw_64_literal_hash, info->string, info, 0);
+
+#ifdef OBJ_EVAX
+ info->sym = 0;
+ info->psym = 0;
+#endif
+ }
+
+ return info;
+}
+
+#ifndef OBJ_EVAX
+static void
+sw_64_adjust_relocs (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
+ void *ptr ATTRIBUTE_UNUSED)
+{
+ segment_info_type *seginfo = seg_info (sec);
+ fixS **prevP;
+ fixS *fixp;
+ fixS *next;
+ fixS *slave;
+
+ /* If seginfo is NULL, we did not create this section; don't do
+ anything with it. By using a pointer to a pointer, we can update
+ the links in place. */
+ if (seginfo == NULL)
+ return;
+
+ /* If there are no relocations, skip the section. */
+ if (!seginfo->fix_root)
+ return;
+
+ /* First rebuild the fixup chain without the explicit lituse and
+ gpdisp_lo16 relocs. */
+ prevP = &seginfo->fix_root;
+ for (fixp = seginfo->fix_root; fixp; fixp = next)
+ {
+ next = fixp->fx_next;
+ fixp->fx_next = (fixS *) 0;
+
+ switch (fixp->fx_r_type)
+ {
+ case BFD_RELOC_SW_64_LITUSE:
+ if (fixp->tc_fix_data.info->n_master == 0)
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("No !literal!%ld was found"),
+ fixp->tc_fix_data.info->sequence);
+#ifdef RELOC_OP_P
+ if (fixp->fx_offset == LITUSE_SW_64_TLSGD)
+ {
+ if (!fixp->tc_fix_data.info->saw_tlsgd)
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("No !tlsgd!%ld was found"),
+ fixp->tc_fix_data.info->sequence);
+ }
+ else if (fixp->fx_offset == LITUSE_SW_64_TLSLDM)
+ {
+ if (!fixp->tc_fix_data.info->saw_tlsldm)
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("No !tlsldm!%ld was found"),
+ fixp->tc_fix_data.info->sequence);
+ }
+#endif
+ break;
+
+ case BFD_RELOC_SW_64_GPDISP_LO16:
+ if (fixp->tc_fix_data.info->n_master == 0)
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("No ldih !gpdisp!%ld was found"),
+ fixp->tc_fix_data.info->sequence);
+ break;
+
+ case BFD_RELOC_SW_64_ELF_LITERAL_GOT:
+ if (fixp->tc_fix_data.info)
+ break;
+ *prevP = fixp;
+ prevP = &fixp->fx_next;
+ break;
+
+ case BFD_RELOC_SW_64_ELF_LITERAL:
+ if (fixp->tc_fix_data.info
+ && (fixp->tc_fix_data.info->saw_tlsgd
+ || fixp->tc_fix_data.info->saw_tlsldm))
+ break;
+ *prevP = fixp;
+ prevP = &fixp->fx_next;
+ break;
+
+ case BFD_RELOC_SW_64_TLSREL_GOT:
+ if (fixp->tc_fix_data.info)
+ break;
+ *prevP = fixp;
+ prevP = &fixp->fx_next;
+ break;
+
+ /* FALLTHRU */
+ default:
+ *prevP = fixp;
+ prevP = &fixp->fx_next;
+ break;
+ }
+ }
+
+ /* Go back and re-chain dependent relocations. They are currently
+ linked through the next_reloc field in reverse order, so as we
+ go through the next_reloc chain, we effectively reverse the chain
+ once again.
+
+ Except if there is more than one !literal for a given sequence
+ number. In that case, the programmer and/or compiler is not sure
+ how control flows from literal to lituse, and we can't be sure to
+ get the relaxation correct.
+
+ ??? Well, actually we could, if there are enough lituses such that
+ we can make each literal have at least one of each lituse type
+ present. Not implemented.
+
+ Also suppress the optimization if the !literals/!lituses are spread
+ in different segments. This can happen with "interesting" uses of
+ inline assembly; examples are present in the Linux kernel semaphores. */
+
+ fixS *fix_prev = NULL;
+ fixS **orig_prevp_root = &seginfo->fix_root;
+ for (fixp = seginfo->fix_root; fixp; fixp = next)
+ {
+ fixS *fix_master = NULL;
+ fixS *fix_literal_got = NULL;
+ fixS *fix_tlsrel_got = NULL;
+ fixS *fix_tlsldm = NULL;
+ fixS *fix_tlsgd = NULL;
+ fixS *fix_tprel = NULL;
+ fixS *fix_gotdtprel = NULL;
+ fixS *fix_gottprel = NULL;
+
+ next = fixp->fx_next;
+ switch (fixp->fx_r_type)
+ {
+ case BFD_RELOC_SW_64_TLSGD:
+ case BFD_RELOC_SW_64_TLSLDM:
+ if (!fixp->tc_fix_data.info)
+ {
+ fix_prev = fixp;
+ break;
+ }
+ if (fixp->tc_fix_data.info->n_master == 0)
+ {
+ fix_prev = fixp;
+ break;
+ }
+ else if (fixp->tc_fix_data.info->n_master > 1)
+ {
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("too many !literal!%ld for %s"),
+ fixp->tc_fix_data.info->sequence,
+ (fixp->fx_r_type == BFD_RELOC_SW_64_TLSGD
+ ? "!tlsgd"
+ : "!tlsldm"));
+ break;
+ }
+ {
+ fix_tlsrel_got = fixp->tc_fix_data.info->tlsrel_got;
+ fix_tlsldm = fixp->tc_fix_data.info->tlsldm;
+ fix_tlsgd = fixp->tc_fix_data.info->tlsgd;
+ fix_tprel = NULL;
+
+ if (fixp->tc_fix_data.info->saw_tlsgd && fix_tlsgd
+ && (fixp == fix_tlsgd))
+ {
+ if (fix_tlsrel_got)
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fix_tlsrel_got;
+ fix_tlsrel_got->fx_next = fix_tlsgd;
+ if (fixp == *orig_prevp_root)
+ *orig_prevp_root = fix_tlsrel_got;
+ }
+ else
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fixp;
+ }
+ fix_tprel = fix_tlsgd;
+ }
+
+ if (fixp->tc_fix_data.info->saw_tlsldm && fix_tlsldm
+ && (fixp == fix_tlsldm))
+ {
+ if (fix_tlsrel_got)
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fix_tlsrel_got;
+ fix_tlsrel_got->fx_next = fix_tlsldm;
+ if (fixp == *orig_prevp_root)
+ *orig_prevp_root = fix_tlsrel_got;
+ }
+ else
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fixp;
+ }
+ fix_tprel = fix_tlsldm;
+ }
+
+ fixp->tc_fix_data.info->master->fx_next = fixp->fx_next;
+ fixp->fx_next = fixp->tc_fix_data.info->master;
+ fix_prev = fixp;
+ fixp = fixp->fx_next;
+ }
+ /* Fall through. */
+
+ case BFD_RELOC_SW_64_ELF_LITERAL:
+ if (fixp->tc_fix_data.info && fixp->tc_fix_data.info->n_master == 1
+ && !fixp->tc_fix_data.info->multi_section_p)
+ {
+ fix_master = fixp->tc_fix_data.info->master;
+ fix_literal_got = fixp->tc_fix_data.info->literal_got;
+ if (fix_master != fixp)
+ {
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("error !literal!%ld"),
+ fixp->tc_fix_data.info->sequence);
+ break;
+ }
+ if (fix_literal_got)
+ {
+ fix_literal_got->fx_next = fixp;
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fix_literal_got;
+ if (fixp == *orig_prevp_root)
+ *orig_prevp_root = fix_literal_got;
+ }
+ else
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fixp;
+ }
+
+ fix_prev = fixp;
+ if (fixp->tc_fix_data.info->slaves != ((fixS *) 0))
+ fix_prev = fixp->tc_fix_data.info->slaves;
+ for (slave = fixp->tc_fix_data.info->slaves; slave != (fixS *) 0;
+ slave = slave->tc_fix_data.next_reloc)
+ {
+ slave->fx_next = fixp->fx_next;
+ fixp->fx_next = slave;
+ }
+ }
+ else
+ {
+ fix_prev = fixp;
+ break;
+ }
+ break;
+
+ case BFD_RELOC_SW_64_GPDISP_HI16:
+ if (fixp->tc_fix_data.info->n_slaves == 0)
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("No ldi !gpdisp!%ld was found"),
+ fixp->tc_fix_data.info->sequence);
+ else
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fixp;
+ slave = fixp->tc_fix_data.info->slaves;
+ slave->fx_next = next;
+ fixp->fx_next = slave;
+ fix_prev = slave;
+ }
+ break;
+
+ case BFD_RELOC_SW_64_GOTDTPREL16:
+ if (!fixp->tc_fix_data.info)
+ {
+ fix_prev = fixp;
+ break;
+ }
+ if (fixp->tc_fix_data.info->n_master == 0)
+ {
+ fix_prev = fixp;
+ break;
+ }
+ else if (fixp->tc_fix_data.info->n_master > 1)
+ {
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("too many !gotdtprel!%ld"),
+ fixp->tc_fix_data.info->sequence);
+ break;
+ }
+
+ {
+ fix_tlsrel_got = fixp->tc_fix_data.info->tlsrel_got;
+ fix_gotdtprel = fixp->tc_fix_data.info->gotdtprel;
+ if ((fixp != fix_gotdtprel)
+ || (fixp != fixp->tc_fix_data.info->master))
+ {
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("bad reloc_tag of !gotdtprel!%ld"),
+ fixp->tc_fix_data.info->sequence);
+ break;
+ }
+ if (fix_tlsrel_got)
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fix_tlsrel_got;
+ fix_tlsrel_got->fx_next = fix_gotdtprel;
+ if (fixp == *orig_prevp_root)
+ *orig_prevp_root = fix_tlsrel_got;
+ }
+ else
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fixp;
+ }
+ fix_prev = fixp;
+ }
+ break;
+ case BFD_RELOC_SW_64_GOTTPREL16:
+ if (!fixp->tc_fix_data.info)
+ {
+ fix_prev = fixp;
+ break;
+ }
+ if (fixp->tc_fix_data.info->n_master == 0)
+ {
+ fix_prev = fixp;
+ break;
+ }
+ else if (fixp->tc_fix_data.info->n_master > 1)
+ {
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("too many !gottprel!%ld"),
+ fixp->tc_fix_data.info->sequence);
+ break;
+ }
+
+ {
+ fix_tlsrel_got = fixp->tc_fix_data.info->tlsrel_got;
+ fix_gottprel = fixp->tc_fix_data.info->gottprel;
+ if ((fixp != fix_gottprel)
+ || (fixp != fixp->tc_fix_data.info->master))
+ {
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("bad reloc_tag of !gottprel!%ld"),
+ fixp->tc_fix_data.info->sequence);
+ break;
+ }
+ if (fix_tlsrel_got)
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fix_tlsrel_got;
+ fix_tlsrel_got->fx_next = fix_gottprel;
+ if (fixp == *orig_prevp_root)
+ *orig_prevp_root = fix_tlsrel_got;
+ }
+ else
+ {
+ if (fix_prev != NULL)
+ fix_prev->fx_next = fixp;
+ }
+ fix_prev = fixp;
+ }
+ break;
+
+ default:
+ fix_prev = fixp;
+ break;
+ }
+ }
+}
+/* Before the relocations are written, reorder them, so that user
+ supplied !lituse relocations follow the appropriate !literal
+ relocations, and similarly for !gpdisp relocations. */
+
+void
+sw_64_before_fix (void)
+{
+ if (sw_64_literal_hash)
+ bfd_map_over_sections (stdoutput, sw_64_adjust_relocs, NULL);
+}
+
+#endif
+
+#ifdef DEBUG_SW_64
+static void
+debug_exp (expressionS tok[], int ntok)
+{
+ int i;
+
+ fprintf (stderr, "debug_exp: %d tokens", ntok);
+ for (i = 0; i < ntok; i++)
+ {
+ expressionS *t = &tok[i];
+ const char *name;
+
+ switch (t->X_op)
+ {
+ default:
+ name = "unknown";
+ break;
+ case O_illegal:
+ name = "O_illegal";
+ break;
+ case O_absent:
+ name = "O_absent";
+ break;
+ case O_constant:
+ name = "O_constant";
+ break;
+ case O_symbol:
+ name = "O_symbol";
+ break;
+ case O_symbol_rva:
+ name = "O_symbol_rva";
+ break;
+ case O_register:
+ name = "O_register";
+ break;
+ case O_big:
+ name = "O_big";
+ break;
+ case O_uminus:
+ name = "O_uminus";
+ break;
+ case O_bit_not:
+ name = "O_bit_not";
+ break;
+ case O_logical_not:
+ name = "O_logical_not";
+ break;
+ case O_multiply:
+ name = "O_multiply";
+ break;
+ case O_divide:
+ name = "O_divide";
+ break;
+ case O_modulus:
+ name = "O_modulus";
+ break;
+ case O_left_shift:
+ name = "O_left_shift";
+ break;
+ case O_right_shift:
+ name = "O_right_shift";
+ break;
+ case O_bit_inclusive_or:
+ name = "O_bit_inclusive_or";
+ break;
+ case O_bit_or_not:
+ name = "O_bit_or_not";
+ break;
+ case O_bit_exclusive_or:
+ name = "O_bit_exclusive_or";
+ break;
+ case O_bit_and:
+ name = "O_bit_and";
+ break;
+ case O_add:
+ name = "O_add";
+ break;
+ case O_subtract:
+ name = "O_subtract";
+ break;
+ case O_eq:
+ name = "O_eq";
+ break;
+ case O_ne:
+ name = "O_ne";
+ break;
+ case O_lt:
+ name = "O_lt";
+ break;
+ case O_le:
+ name = "O_le";
+ break;
+ case O_ge:
+ name = "O_ge";
+ break;
+ case O_gt:
+ name = "O_gt";
+ break;
+ case O_logical_and:
+ name = "O_logical_and";
+ break;
+ case O_logical_or:
+ name = "O_logical_or";
+ break;
+ case O_index:
+ name = "O_index";
+ break;
+ case O_pregister:
+ name = "O_pregister";
+ break;
+ case O_cpregister:
+ name = "O_cpregister";
+ break;
+ case O_literal:
+ name = "O_literal";
+ break;
+ case O_lituse_addr:
+ name = "O_lituse_addr";
+ break;
+ case O_lituse_base:
+ name = "O_lituse_base";
+ break;
+ case O_lituse_bytoff:
+ name = "O_lituse_bytoff";
+ break;
+ case O_lituse_jsr:
+ name = "O_lituse_jsr";
+ break;
+ case O_lituse_tlsgd:
+ name = "O_lituse_tlsgd";
+ break;
+ case O_lituse_tlsldm:
+ name = "O_lituse_tlsldm";
+ break;
+ case O_lituse_jsrdirect:
+ name = "O_lituse_jsrdirect";
+ break;
+ case O_gpdisp:
+ name = "O_gpdisp";
+ break;
+ case O_gprelhigh:
+ name = "O_gprelhigh";
+ break;
+ case O_gprellow:
+ name = "O_gprellow";
+ break;
+ case O_gprel:
+ name = "O_gprel";
+ break;
+ case O_samegp:
+ name = "O_samegp";
+ break;
+ case O_tlsgd:
+ name = "O_tlsgd";
+ break;
+ case O_tlsldm:
+ name = "O_tlsldm";
+ break;
+ case O_gotdtprel:
+ name = "O_gotdtprel";
+ break;
+ case O_dtprelhi:
+ name = "O_dtprelhi";
+ break;
+ case O_dtprello:
+ name = "O_dtprello";
+ break;
+ case O_dtprel:
+ name = "O_dtprel";
+ break;
+ case O_gottprel:
+ name = "O_gottprel";
+ break;
+ case O_tprelhi:
+ name = "O_tprelhi";
+ break;
+ case O_tprello:
+ name = "O_tprello";
+ break;
+ case O_tprel:
+ name = "O_tprel";
+ break;
+ }
+
+ fprintf (stderr, ", %s (%s, %s, %d)", name,
+ (t->X_add_symbol) ? S_GET_NAME (t->X_add_symbol) : "--",
+ (t->X_op_symbol) ? S_GET_NAME (t->X_op_symbol) : "--",
+ (int) t->X_add_number);
+ }
+ fprintf (stderr, "\n");
+ fflush (stderr);
+}
+#endif
+
+/* Parse the arguments to an opcode. */
+
+static int
+tokenize_arguments (char *str, expressionS tok[], int ntok)
+{
+ expressionS *end_tok = tok + ntok;
+ char *old_input_line_pointer;
+ int saw_comma = 0, saw_arg = 0;
+#ifdef DEBUG_SW_64
+ expressionS *orig_tok = tok;
+#endif
+#ifdef RELOC_OP_P
+ char *p;
+ const struct sw_64_reloc_op_tag *r;
+ int c, i;
+ size_t len;
+ int reloc_found_p = 0;
+#endif
+
+ memset (tok, 0, sizeof (*tok) * ntok);
+
+ /* Save and restore input_line_pointer around this function. */
+ old_input_line_pointer = input_line_pointer;
+ input_line_pointer = str;
+
+#ifdef RELOC_OP_P
+ /* ??? Wrest control of ! away from the regular expression parser. */
+ is_end_of_line[(unsigned char) '!'] = 1;
+#endif
+
+ while (tok < end_tok && *input_line_pointer)
+ {
+ SKIP_WHITESPACE ();
+ switch (*input_line_pointer)
+ {
+ case '\0':
+ goto fini;
+
+#ifdef RELOC_OP_P
+ case '!':
+ /* A relocation operand can be placed after the normal operand on an
+ assembly language statement, and has the following form:
+ !relocation_type!sequence_number. */
+ if (reloc_found_p)
+ {
+ /* Only support one relocation op per insn. */
+ as_bad (_ ("More than one relocation op per insn"));
+ goto err_report;
+ }
+
+ if (!saw_arg)
+ goto err;
+
+ ++input_line_pointer;
+ SKIP_WHITESPACE ();
+ c = get_symbol_name (&p);
+
+ /* Parse !relocation_type. */
+ len = input_line_pointer - p;
+ if (len == 0)
+ {
+ as_bad (_ ("No relocation operand"));
+ goto err_report;
+ }
+
+ r = &sw_64_reloc_op[0];
+ for (i = sw_64_num_reloc_op - 1; i >= 0; i--, r++)
+ if (len == r->length && memcmp (p, r->name, len) == 0)
+ break;
+ if (i < 0)
+ {
+ as_bad (_ ("Unknown relocation operand: !%s"), p);
+ goto err_report;
+ }
+
+ *input_line_pointer = c;
+ SKIP_WHITESPACE_AFTER_NAME ();
+ if (*input_line_pointer != '!')
+ {
+ if (r->require_seq)
+ {
+ as_bad (_ ("no sequence number after !%s"), p);
+ goto err_report;
+ }
+
+ tok->X_add_number = 0;
+ }
+ else
+ {
+ if (!r->allow_seq)
+ {
+ as_bad (_ ("!%s does not use a sequence number"), p);
+ goto err_report;
+ }
+
+ input_line_pointer++;
+
+ /* Parse !sequence_number. */
+ expression (tok);
+ if (tok->X_op != O_constant || tok->X_add_number <= 0)
+ {
+ as_bad (_ ("Bad sequence number: !%s!%s"), r->name,
+ input_line_pointer);
+ goto err_report;
+ }
+ }
+
+ tok->X_op = r->op;
+ reloc_found_p = 1;
+ ++tok;
+ break;
+#endif /* RELOC_OP_P */
+
+ case ',':
+ ++input_line_pointer;
+ if (saw_comma || !saw_arg)
+ goto err;
+ saw_comma = 1;
+ break;
+
+ case '(': {
+ char *hold = input_line_pointer++;
+
+ /* First try for parenthesized register ... */
+ expression (tok);
+ resolve_register (tok);
+ if (*input_line_pointer == ')' && tok->X_op == O_register)
+ {
+ tok->X_op = (saw_comma ? O_cpregister : O_pregister);
+ saw_comma = 0;
+ saw_arg = 1;
+ ++input_line_pointer;
+ ++tok;
+ break;
+ }
+
+ /* ... then fall through to plain expression. */
+ input_line_pointer = hold;
+ }
+ /* Fall through. */
+
+ default:
+ if (saw_arg && !saw_comma)
+ goto err;
+
+ expression (tok);
+ if (tok->X_op == O_illegal || tok->X_op == O_absent)
+ goto err;
+
+ resolve_register (tok);
+
+ saw_comma = 0;
+ saw_arg = 1;
+ ++tok;
+ break;
+ }
+ }
+
+fini:
+ if (saw_comma)
+ goto err;
+ input_line_pointer = old_input_line_pointer;
+
+#ifdef DEBUG_SW_64
+ debug_exp (orig_tok, ntok - (end_tok - tok));
+#endif
+#ifdef RELOC_OP_P
+ is_end_of_line[(unsigned char) '!'] = 0;
+#endif
+
+ return ntok - (end_tok - tok);
+
+err:
+#ifdef RELOC_OP_P
+ is_end_of_line[(unsigned char) '!'] = 0;
+#endif
+ input_line_pointer = old_input_line_pointer;
+ return TOKENIZE_ERROR;
+
+#ifdef RELOC_OP_P
+err_report:
+ is_end_of_line[(unsigned char) '!'] = 0;
+#endif
+ input_line_pointer = old_input_line_pointer;
+ return TOKENIZE_ERROR_REPORT;
+}
+
+/* Search forward through all variants of an opcode looking for a
+ syntax match. */
+
+static const struct sw_64_opcode *
+find_opcode_match (const struct sw_64_opcode *first_opcode,
+ const expressionS *tok, int *pntok, int *pcpumatch)
+{
+ const struct sw_64_opcode *opcode = first_opcode;
+ int ntok = *pntok;
+ int got_cpu_match = 0;
+
+ do
+ {
+ const unsigned char *opidx;
+ int tokidx = 0;
+
+ /* Don't match opcodes that don't exist on this architecture. */
+ if (!(opcode->flags & sw_64_target))
+ goto match_failed;
+
+ got_cpu_match = 1;
+
+ for (opidx = opcode->operands; *opidx; ++opidx)
+ {
+ const struct sw_64_operand *operand = &sw_64_operands[*opidx];
+
+ /* Only take input from real operands. */
+ if (operand->flags & AXP_OPERAND_FAKE)
+ continue;
+
+ /* When we expect input, make sure we have it. */
+ if (tokidx >= ntok)
+ {
+ /* -pal: don't allowed to use default result register. */
+ if (pal_org_backwrards)
+ goto match_failed;
+ else if ((operand->flags & AXP_OPERAND_OPTIONAL_MASK) == 0)
+ goto match_failed;
+ continue;
+ }
+
+ /* Match operand type with expression type. */
+ switch (operand->flags & AXP_OPERAND_TYPECHECK_MASK)
+ {
+ case AXP_OPERAND_IR:
+ if (tok[tokidx].X_op != O_register
+ || !is_ir_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ break;
+ case AXP_OPERAND_FPR:
+ if (tok[tokidx].X_op != O_register
+ || !is_fpr_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ break;
+ case AXP_OPERAND_IR | AXP_OPERAND_PARENS:
+ if (tok[tokidx].X_op != O_pregister
+ || !is_ir_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ break;
+ case AXP_OPERAND_IR | AXP_OPERAND_PARENS | AXP_OPERAND_COMMA:
+ if (tok[tokidx].X_op != O_cpregister
+ || !is_ir_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ break;
+
+ case AXP_OPERAND_RELATIVE:
+ case AXP_OPERAND_SIGNED:
+ case AXP_OPERAND_UNSIGNED:
+ switch (tok[tokidx].X_op)
+ {
+ case O_illegal:
+ case O_absent:
+ case O_register:
+ case O_pregister:
+ case O_cpregister:
+ goto match_failed;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ /* Everything else should have been fake. */
+ abort ();
+ }
+ ++tokidx;
+ }
+
+ /* Possible match -- did we use all of our input? */
+ if (tokidx == ntok)
+ {
+ *pntok = ntok;
+ return opcode;
+ }
+
+ match_failed:;
+ } while (++opcode - sw_64_opcodes < (int) sw_64_num_opcodes
+ && !strcmp (opcode->name, first_opcode->name));
+
+ if (*pcpumatch)
+ *pcpumatch = got_cpu_match;
+
+ return NULL;
+}
+
+/* Given an opcode name and a pre-tokenized set of arguments, assemble
+ the insn, but do not emit it.
+
+ Note that this implies no macros allowed, since we can't store more
+ than one insn in an insn structure. */
+
+static void
+assemble_tokens_to_insn (const char *opname, const expressionS *tok, int ntok,
+ struct sw_64_insn *insn)
+{
+ const struct sw_64_opcode *opcode;
+
+ /* Search opcodes. */
+ opcode
+ = (const struct sw_64_opcode *) str_hash_find (sw_64_opcode_hash, opname);
+ if (opcode)
+ {
+ int cpumatch;
+ opcode = find_opcode_match (opcode, tok, &ntok, &cpumatch);
+ if (opcode)
+ {
+ assemble_insn (opcode, tok, ntok, insn, BFD_RELOC_UNUSED);
+ return;
+ }
+ else if (cpumatch)
+ as_bad (_ ("inappropriate arguments for opcode `%s'"), opname);
+ else
+ as_bad (_ ("opcode `%s' not supported for target %s"), opname,
+ sw_64_target_name);
+ }
+ else
+ as_bad (_ ("unknown opcode `%s'"), opname);
+}
+
+/* Build a BFD section with its flags set appropriately for the .lita,
+ .lit8, or .lit4 sections. */
+
+static void
+create_literal_section (const char *name, segT *secp, symbolS **symp)
+{
+ segT current_section = now_seg;
+ int current_subsec = now_subseg;
+ segT new_sec;
+
+ *secp = new_sec = subseg_new (name, 0);
+ subseg_set (current_section, current_subsec);
+ bfd_set_section_alignment (new_sec, 4);
+ bfd_set_section_flags (new_sec, (SEC_RELOC | SEC_ALLOC | SEC_LOAD
+ | SEC_READONLY | SEC_DATA));
+
+ S_CLEAR_EXTERNAL (*symp = section_symbol (new_sec));
+}
+
+/* Load a (partial) expression into a target register.
+
+ If poffset is not null, after the call it will either contain
+ O_constant 0, or a 16-bit offset appropriate for any MEM format
+ instruction. In addition, pbasereg will be modified to point to
+ the base register to use in that MEM format instruction.
+
+ In any case, *pbasereg should contain a base register to add to the
+ expression. This will normally be either AXP_REG_ZERO or
+ sw_64_gp_register. Symbol addresses will always be loaded via $gp,
+ so "foo ($0)" is interpreted as adding the address of foo to $0;
+ i.e. "ldl $targ, LIT ($gp); addl $targ, $0, $targ". Odd, perhaps,
+ but this is what OSF/1 does.
+
+ If explicit relocations of the form !literal!<number> are allowed,
+ and used, then explicit_reloc with be an expression pointer.
+
+ Finally, the return value is nonzero if the calling macro may emit
+ a LITUSE reloc if otherwise appropriate; the return value is the
+ sequence number to use. */
+
+static long
+load_expression (int targreg, const expressionS *exp, int *pbasereg,
+ expressionS *poffset, const char *opname)
+{
+ long emit_lituse = 0;
+ offsetT addend = exp->X_add_number;
+ int basereg = *pbasereg;
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+
+ switch (exp->X_op)
+ {
+ case O_symbol: {
+#ifdef OBJ_ECOFF
+ offsetT lit;
+
+ /* Attempt to reduce .lit load by splitting the offset from
+ its symbol when possible, but don't create a situation in
+ which we'd fail. */
+ if (!range_signed_32 (addend)
+ && (sw_64_noat_on || targreg == AXP_REG_AT))
+ {
+ lit = add_to_literal_pool (exp->X_add_symbol, addend,
+ sw_64_lita_section, 8);
+ addend = 0;
+ }
+ else
+ lit
+ = add_to_literal_pool (exp->X_add_symbol, 0, sw_64_lita_section, 8);
+
+ if (lit >= 0x8000)
+ as_fatal (_ ("overflow in literal (.lita) table"));
+
+ /* Emit "ldl r, lit (gp)". */
+
+ if (basereg != sw_64_gp_register && targreg == basereg)
+ {
+ if (sw_64_noat_on)
+ as_warn (_ ("macro requires $at register while noat in effect"));
+ if (targreg == AXP_REG_AT)
+ as_warn (_ ("macro requires $at while $at in use"));
+
+ set_tok_reg (newtok[0], AXP_REG_AT);
+ }
+ else
+ set_tok_reg (newtok[0], targreg);
+
+ set_tok_sym (newtok[1], sw_64_lita_symbol, lit);
+ set_tok_preg (newtok[2], sw_64_gp_register);
+
+ assemble_tokens_to_insn ("ldl", newtok, 3, &insn);
+
+ gas_assert (insn.nfixups == 1);
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_LITERAL;
+ insn.sequence = emit_lituse = next_sequence_num--;
+#endif /* OBJ_ECOFF */
+#ifdef OBJ_ELF
+ /* Emit "ldl r, gotoff (gp)". */
+
+ if (basereg != sw_64_gp_register && targreg == basereg)
+ {
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+ if (targreg == AXP_REG_AT)
+ as_bad (_ ("macro requires $at while $at in use"));
+
+ set_tok_reg (newtok[0], AXP_REG_AT);
+ }
+ else
+ set_tok_reg (newtok[0], targreg);
+
+ /* XXX: Disable this .got minimizing optimization so that we can get
+ better instruction offset knowledge in the compiler. This happens
+ very infrequently anyway. */
+ if (1
+ || (!range_signed_32 (addend)
+ && (sw_64_noat_on || targreg == AXP_REG_AT)))
+ {
+ newtok[1] = *exp;
+ addend = 0;
+ }
+ else
+ set_tok_sym (newtok[1], exp->X_add_symbol, 0);
+
+ set_tok_preg (newtok[2], sw_64_gp_register);
+
+ assemble_tokens_to_insn ("ldl", newtok, 3, &insn);
+
+ gas_assert (insn.nfixups == 1);
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_ELF_LITERAL;
+ insn.sequence = emit_lituse = next_sequence_num--;
+#endif /* OBJ_ELF */
+#ifdef OBJ_EVAX
+ /* Find symbol or symbol pointer in link section. */
+
+ if (exp->X_add_symbol == sw_64_evax_proc->symbol)
+ {
+ /* Linkage-relative expression. */
+ set_tok_reg (newtok[0], targreg);
+
+ if (range_signed_16 (addend))
+ {
+ set_tok_const (newtok[1], addend);
+ addend = 0;
+ }
+ else
+ {
+ set_tok_const (newtok[1], 0);
+ }
+ set_tok_preg (newtok[2], basereg);
+ assemble_tokens_to_insn ("ldi", newtok, 3, &insn);
+ }
+ else
+ {
+ const char *symname = S_GET_NAME (exp->X_add_symbol);
+ const char *ptr1, *ptr2;
+ int symlen = strlen (symname);
+
+ if ((symlen > 4
+ && strcmp (ptr2 = &symname[symlen - 4], "..lk") == 0))
+ {
+ /* Access to an item whose address is stored in the linkage
+ section. Just read the address. */
+ set_tok_reg (newtok[0], targreg);
+
+ newtok[1] = *exp;
+ newtok[1].X_op = O_subtract;
+ newtok[1].X_op_symbol = sw_64_evax_proc->symbol;
+
+ set_tok_preg (newtok[2], basereg);
+ assemble_tokens_to_insn ("ldi", newtok, 3, &insn);
+ sw_64_linkage_symbol = exp->X_add_symbol;
+
+ if (poffset)
+ set_tok_const (*poffset, 0);
+
+ if (sw_64_flag_replace && targreg == 26)
+ {
+ /* Add a NOP fixup for 'ldX $26,YYY..NAME..lk'. */
+ char *ensymname;
+ symbolS *ensym;
+
+ /* Build the entry name as 'NAME..en'. */
+ ptr1 = strstr (symname, "..") + 2;
+ if (ptr1 > ptr2)
+ ptr1 = symname;
+ ensymname = XNEWVEC (char, ptr2 - ptr1 + 5);
+ memcpy (ensymname, ptr1, ptr2 - ptr1);
+ memcpy (ensymname + (ptr2 - ptr1), "..en", 5);
+
+ gas_assert (insn.nfixups + 1 <= MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = BFD_RELOC_SW_64_NOP;
+ ensym = symbol_find_or_make (ensymname);
+ free (ensymname);
+ symbol_mark_used (ensym);
+ /* The fixup must be the same as the BFD_RELOC_SW_64_BOH
+ case in emit_jsrjmp. See B.4.5.2 of the OpenVMS Linker
+ Utility Manual. */
+ insn.fixups[insn.nfixups].exp.X_op = O_symbol;
+ insn.fixups[insn.nfixups].exp.X_add_symbol = ensym;
+ insn.fixups[insn.nfixups].exp.X_add_number = 0;
+ insn.fixups[insn.nfixups].xtrasym = sw_64_linkage_symbol;
+ insn.fixups[insn.nfixups].procsym = sw_64_evax_proc->symbol;
+ insn.nfixups++;
+
+ /* ??? Force bsym to be instantiated now, as it will be
+ too late to do so in tc_gen_reloc. */
+ symbol_get_bfdsym (exp->X_add_symbol);
+ }
+ else if (sw_64_flag_replace && targreg == 27)
+ {
+ /* Add a ldi fixup for 'ldX $27,YYY.NAME..lk+8'. */
+ char *psymname;
+ symbolS *psym;
+
+ /* Extract NAME. */
+ ptr1 = strstr (symname, "..") + 2;
+ if (ptr1 > ptr2)
+ ptr1 = symname;
+ psymname = xmemdup0 (ptr1, ptr2 - ptr1);
+
+ gas_assert (insn.nfixups + 1 <= MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = BFD_RELOC_SW_64_LDA;
+ psym = symbol_find_or_make (psymname);
+ free (psymname);
+ symbol_mark_used (psym);
+ insn.fixups[insn.nfixups].exp.X_op = O_subtract;
+ insn.fixups[insn.nfixups].exp.X_add_symbol = psym;
+ insn.fixups[insn.nfixups].exp.X_op_symbol
+ = sw_64_evax_proc->symbol;
+ insn.fixups[insn.nfixups].exp.X_add_number = 0;
+ insn.fixups[insn.nfixups].xtrasym = sw_64_linkage_symbol;
+ insn.fixups[insn.nfixups].procsym = sw_64_evax_proc->symbol;
+ insn.nfixups++;
+ }
+
+ emit_insn (&insn);
+ return 0;
+ }
+ else
+ {
+ /* Not in the linkage section. Put the value into the linkage
+ section. */
+ symbolS *linkexp;
+
+ if (!range_signed_32 (addend))
+ addend = sign_extend_32 (addend);
+ linkexp = add_to_link_pool (exp->X_add_symbol, 0);
+ set_tok_reg (newtok[0], targreg);
+ set_tok_sym (newtok[1], linkexp, 0);
+ set_tok_preg (newtok[2], basereg);
+ assemble_tokens_to_insn ("ldl", newtok, 3, &insn);
+ }
+ }
+#endif /* OBJ_EVAX */
+
+ emit_insn (&insn);
+
+#ifndef OBJ_EVAX
+ if (basereg != sw_64_gp_register && basereg != AXP_REG_ZERO)
+ {
+ /* Emit "addl r, base, r". */
+
+ set_tok_reg (newtok[1], basereg);
+ set_tok_reg (newtok[2], targreg);
+ assemble_tokens ("addl", newtok, 3, 0);
+ }
+#endif
+ basereg = targreg;
+ }
+ break;
+
+ case O_constant:
+ break;
+ /* .text
+ * call_hmc__tbi_addr:
+ * ldi $4, ((tbi_tbl - call_hmc__tbi_addr) & 0xFFFF)($4)
+ * tbi_tbl:
+ * the value of label tbi_tbl can't be calculated,so the op of the
+ * expression "((tbi_tbl - call_hmc__tbi_addr) & 0xFFFF)" is "O_bit_and"
+ * but not "O_constant", so we must pass it !
+ * */
+ case O_bit_and:
+ set_tok_reg (newtok[0], targreg);
+ newtok[1] = *exp;
+ set_tok_preg (newtok[2], basereg);
+ assemble_tokens ("ldi", newtok, 3, 0);
+ break;
+
+ case O_subtract:
+ /* Assume that this difference expression will be resolved to an
+ absolute value and that that value will fit in 16 bits. */
+
+ set_tok_reg (newtok[0], targreg);
+ newtok[1] = *exp;
+ set_tok_preg (newtok[2], basereg);
+ assemble_tokens (opname, newtok, 3, 0);
+
+ if (poffset)
+ set_tok_const (*poffset, 0);
+ return 0;
+
+ case O_big:
+ if (exp->X_add_number > 0)
+ as_bad (_ ("bignum invalid; zero assumed"));
+ else
+ as_bad (_ ("floating point number invalid; zero assumed"));
+ addend = 0;
+ break;
+
+ default:
+ as_bad (_ ("can't handle expression"));
+ addend = 0;
+ break;
+ }
+
+ if (!range_signed_32 (addend))
+ {
+#ifdef OBJ_EVAX
+ symbolS *litexp;
+#else
+ offsetT lit;
+ long seq_num = next_sequence_num--;
+#endif
+
+ /* For 64-bit addends, just put it in the literal pool. */
+#ifdef OBJ_EVAX
+ /* Emit "ldl targreg, lit (basereg)". */
+ litexp = add_to_link_pool (section_symbol (absolute_section), addend);
+ set_tok_reg (newtok[0], targreg);
+ set_tok_sym (newtok[1], litexp, 0);
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens ("ldl", newtok, 3, 0);
+#else
+
+ if (sw_64_lit8_section == NULL)
+ {
+ create_literal_section (".lit8", &sw_64_lit8_section,
+ &sw_64_lit8_symbol);
+
+#ifdef OBJ_ECOFF
+ sw_64_lit8_literal = add_to_literal_pool (sw_64_lit8_symbol, 0x8000,
+ sw_64_lita_section, 8);
+ if (sw_64_lit8_literal >= 0x8000)
+ as_fatal (_ ("overflow in literal (.lita) table"));
+#endif
+ }
+
+ lit = add_to_literal_pool (NULL, addend, sw_64_lit8_section, 8) - 0x8000;
+ if (lit >= 0x8000)
+ as_fatal (_ ("overflow in literal (.lit8) table"));
+
+ /* Emit "ldi litreg, .lit8+0x8000". */
+
+ if (targreg == basereg)
+ {
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+ if (targreg == AXP_REG_AT)
+ as_bad (_ ("macro requires $at while $at in use"));
+
+ set_tok_reg (newtok[0], AXP_REG_AT);
+ }
+ else
+ set_tok_reg (newtok[0], targreg);
+#ifdef OBJ_ECOFF
+ set_tok_sym (newtok[1], sw_64_lita_symbol, sw_64_lit8_literal);
+#endif
+#ifdef OBJ_ELF
+ set_tok_sym (newtok[1], sw_64_lit8_symbol, 0x8000);
+#endif
+ set_tok_preg (newtok[2], sw_64_gp_register);
+
+ assemble_tokens_to_insn ("ldl", newtok, 3, &insn);
+
+ gas_assert (insn.nfixups == 1);
+#ifdef OBJ_ECOFF
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_LITERAL;
+#endif
+#ifdef OBJ_ELF
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_ELF_LITERAL;
+#endif
+ insn.sequence = seq_num;
+
+ emit_insn (&insn);
+
+ /* Emit "ldl litreg, lit (litreg)". */
+
+ set_tok_const (newtok[1], lit);
+ set_tok_preg (newtok[2], newtok[0].X_add_number);
+
+ assemble_tokens_to_insn ("ldl", newtok, 3, &insn);
+
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BASE;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = seq_num;
+ emit_lituse = 0;
+
+ emit_insn (&insn);
+
+ /* Emit "addl litreg, base, target". */
+
+ if (basereg != AXP_REG_ZERO)
+ {
+ set_tok_reg (newtok[1], basereg);
+ set_tok_reg (newtok[2], targreg);
+ assemble_tokens ("addl", newtok, 3, 0);
+ }
+#endif /* !OBJ_EVAX */
+
+ if (poffset)
+ set_tok_const (*poffset, 0);
+ *pbasereg = targreg;
+ }
+ else
+ {
+ offsetT low, high, extra, tmp;
+
+ /* For 32-bit operands, break up the addend. */
+
+ low = sign_extend_16 (addend);
+ tmp = addend - low;
+ high = sign_extend_16 (tmp >> 16);
+
+ if (tmp - (high << 16))
+ {
+ extra = 0x4000;
+ tmp -= 0x40000000;
+ high = sign_extend_16 (tmp >> 16);
+ }
+ else
+ extra = 0;
+
+ set_tok_reg (newtok[0], targreg);
+ set_tok_preg (newtok[2], basereg);
+
+ if (extra)
+ {
+ /* Emit "ldih r, extra (r). */
+ set_tok_const (newtok[1], extra);
+ assemble_tokens ("ldih", newtok, 3, 0);
+ set_tok_preg (newtok[2], basereg = targreg);
+ }
+
+ if (high)
+ {
+ /* Emit "ldih r, high (r). */
+ set_tok_const (newtok[1], high);
+ if (newtok[0].X_add_number == 31 && newtok[0].X_op == O_register)
+ as_warn (_ (" the disp is out of range ,may be incorrect !"));
+ else
+ assemble_tokens ("ldih", newtok, 3, 0);
+ basereg = targreg;
+ set_tok_preg (newtok[2], basereg);
+ }
+
+ if ((low && !poffset) || (!poffset && basereg != targreg))
+ {
+ /* Emit "ldi r, low (base)". */
+ set_tok_const (newtok[1], low);
+ assemble_tokens ("ldi", newtok, 3, 0);
+ basereg = targreg;
+ low = 0;
+ }
+
+ if (poffset)
+ set_tok_const (*poffset, low);
+ *pbasereg = basereg;
+ }
+
+ return emit_lituse;
+}
+
+/* The ldi macro differs from the ldi instruction in that it handles
+ most simple expressions, particularly symbol address loads and
+ large constants. */
+
+static void
+emit_ldi (const expressionS *tok, int ntok, const void *unused ATTRIBUTE_UNUSED)
+{
+ int basereg;
+
+ if (ntok == 2)
+ basereg = (tok[1].X_op == O_constant ? AXP_REG_ZERO : sw_64_gp_register);
+ else
+ basereg = tok[2].X_add_number;
+
+ (void) load_expression (tok[0].X_add_number, &tok[1], &basereg, NULL, "ldi");
+}
+
+/* The ldih macro differs from the ldih instruction in that it has $31
+ as an implied base register. */
+
+static void
+emit_ldih (const expressionS *tok, int ntok ATTRIBUTE_UNUSED,
+ const void *unused ATTRIBUTE_UNUSED)
+{
+ expressionS newtok[3];
+
+ newtok[0] = tok[0];
+ newtok[1] = tok[1];
+ set_tok_preg (newtok[2], AXP_REG_ZERO);
+
+ assemble_tokens ("ldih", newtok, 3, 0);
+}
+
+/* Called internally to handle all alignment needs. This takes care
+ of eliding calls to frag_align if'n the cached current alignment
+ says we've already got it, as well as taking care of the auto-align
+ feature wrt labels. */
+
+static void
+sw_64_align (int n, char *pfill, symbolS *label, int force ATTRIBUTE_UNUSED)
+{
+ if (sw_64_current_align >= n)
+ return;
+
+ if (pfill == NULL)
+ {
+ if (subseg_text_p (now_seg))
+ frag_align_code (n, 0);
+ else
+ frag_align (n, 0, 0);
+ }
+ else
+ frag_align (n, *pfill, 0);
+
+ sw_64_current_align = n;
+
+ if (label != NULL && S_GET_SEGMENT (label) == now_seg)
+ {
+ symbol_set_frag (label, frag_now);
+ S_SET_VALUE (label, (valueT) frag_now_fix ());
+ }
+
+ record_alignment (now_seg, n);
+
+ /* ??? If sw_64_flag_relax && force && elf, record the requested alignment
+ in a reloc for the linker to see. */
+}
+
+/* Actually output an instruction with its fixup. */
+
+static void
+emit_insn (struct sw_64_insn *insn)
+{
+ char *f;
+ int i;
+
+ /* Take care of alignment duties. */
+ if (sw_64_auto_align_on && sw_64_current_align < 2)
+ sw_64_align (2, (char *) NULL, sw_64_insn_label, 0);
+ if (sw_64_current_align > 2)
+ sw_64_current_align = 2;
+ sw_64_insn_label = NULL;
+
+ /* Write out the instruction. */
+ f = frag_more (4);
+ md_number_to_chars (f, insn->insn, 4);
+
+#ifdef OBJ_ELF
+ dwarf2_emit_insn (4);
+#endif
+
+ /* Apply the fixups in order. */
+ for (i = 0; i < insn->nfixups; ++i)
+ {
+ const struct sw_64_operand *operand = (const struct sw_64_operand *) 0;
+ struct sw_64_fixup *fixup = &insn->fixups[i];
+ struct sw_64_reloc_tag *info = NULL;
+ int size, pcrel;
+ fixS *fixP;
+
+ /* Some fixups are only used internally and so have no howto. */
+ if ((int) fixup->reloc < 0)
+ {
+ operand = &sw_64_operands[-(int) fixup->reloc];
+ size = 4;
+ pcrel = ((operand->flags & AXP_OPERAND_RELATIVE) != 0);
+ }
+ else if (fixup->reloc > BFD_RELOC_UNUSED
+ || fixup->reloc == BFD_RELOC_SW_64_GPDISP_HI16
+ || fixup->reloc == BFD_RELOC_SW_64_GPDISP_LO16)
+ {
+ size = 2;
+ pcrel = 0;
+ }
+ else
+ {
+ reloc_howto_type *reloc_howto
+ = bfd_reloc_type_lookup (stdoutput,
+ (bfd_reloc_code_real_type) fixup->reloc);
+ gas_assert (reloc_howto);
+
+ size = bfd_get_reloc_size (reloc_howto);
+
+ switch (fixup->reloc)
+ {
+#ifdef OBJ_EVAX
+ case BFD_RELOC_SW_64_NOP:
+ case BFD_RELOC_SW_64_BSR:
+ case BFD_RELOC_SW_64_LDA:
+ case BFD_RELOC_SW_64_BOH:
+ break;
+#endif
+ default:
+ gas_assert (size >= 1 && size <= 4);
+ }
+
+ pcrel = reloc_howto->pc_relative;
+ }
+
+ fixP = fix_new_exp (frag_now, f - frag_now->fr_literal, size, &fixup->exp,
+ pcrel, (bfd_reloc_code_real_type) fixup->reloc);
+
+ /* Turn off complaints that the addend is too large for some fixups,
+ and copy in the sequence number for the explicit relocations. */
+ switch (fixup->reloc)
+ {
+ case BFD_RELOC_SW_64_HINT:
+ case BFD_RELOC_GPREL32:
+ case BFD_RELOC_GPREL16:
+ case BFD_RELOC_SW_64_GPREL_HI16:
+ case BFD_RELOC_SW_64_GPREL_LO16:
+ case BFD_RELOC_SW_64_DTPREL_HI16:
+ case BFD_RELOC_SW_64_DTPREL_LO16:
+ case BFD_RELOC_SW_64_DTPREL16:
+ case BFD_RELOC_SW_64_TPREL_HI16:
+ case BFD_RELOC_SW_64_TPREL_LO16:
+ case BFD_RELOC_SW_64_TPREL16:
+ fixP->fx_no_overflow = 1;
+ break;
+
+ case BFD_RELOC_SW_64_GPDISP_HI16:
+ fixP->fx_no_overflow = 1;
+ fixP->fx_addsy = section_symbol (now_seg);
+ fixP->fx_offset = 0;
+
+ info = get_sw_64_reloc_tag (insn->sequence);
+ if (++info->n_master > 1)
+ as_bad (_ ("too many ldih insns for !gpdisp!%ld"), insn->sequence);
+ if (info->segment != now_seg)
+ as_bad (_ (
+ "both insns for !gpdisp!%ld must be in the same section"),
+ insn->sequence);
+ fixP->tc_fix_data.info = info;
+ break;
+
+ case BFD_RELOC_SW_64_GPDISP_LO16:
+ fixP->fx_no_overflow = 1;
+
+ info = get_sw_64_reloc_tag (insn->sequence);
+ if (++info->n_slaves > 1)
+ as_bad (_ ("too many ldi insns for !gpdisp!%ld"), insn->sequence);
+ if (info->segment != now_seg)
+ as_bad (_ (
+ "both insns for !gpdisp!%ld must be in the same section"),
+ insn->sequence);
+ fixP->tc_fix_data.info = info;
+ info->slaves = fixP;
+ break;
+
+ case BFD_RELOC_SW_64_ELF_LITERAL_GOT:
+ fixP->fx_no_overflow = 1;
+ fixP->fx_size = 4;
+
+ if (insn->sequence == 0)
+ break;
+ info = get_sw_64_reloc_tag (insn->sequence);
+ info->literal_got = fixP;
+ fixP->tc_fix_data.info = info;
+ break;
+
+ case BFD_RELOC_SW_64_LITERAL:
+ case BFD_RELOC_SW_64_ELF_LITERAL:
+ fixP->fx_no_overflow = 1;
+ if ((frags_pre_fixup != NULL)
+ && (frags_pre_fixup->fx_r_type
+ == BFD_RELOC_SW_64_ELF_LITERAL_GOT))
+ fixP->fx_size = 4;
+ if (insn->sequence == 0)
+ break;
+ info = get_sw_64_reloc_tag (insn->sequence);
+ info->master = fixP;
+ info->n_master++;
+ if (info->segment != now_seg)
+ info->multi_section_p = 1;
+ fixP->tc_fix_data.info = info;
+ break;
+
+#ifdef RELOC_OP_P
+ case DUMMY_RELOC_LITUSE_ADDR:
+ fixP->fx_offset = LITUSE_SW_64_ADDR;
+ goto do_lituse;
+ case DUMMY_RELOC_LITUSE_BASE:
+ fixP->fx_offset = LITUSE_SW_64_BASE;
+ goto do_lituse;
+ case DUMMY_RELOC_LITUSE_BYTOFF:
+ fixP->fx_offset = LITUSE_SW_64_BYTOFF;
+ goto do_lituse;
+ case DUMMY_RELOC_LITUSE_JSR:
+ fixP->fx_offset = LITUSE_SW_64_JSR;
+ goto do_lituse;
+ case DUMMY_RELOC_LITUSE_TLSGD:
+ fixP->fx_offset = LITUSE_SW_64_TLSGD;
+ goto do_lituse;
+ case DUMMY_RELOC_LITUSE_TLSLDM:
+ fixP->fx_offset = LITUSE_SW_64_TLSLDM;
+ goto do_lituse;
+ case DUMMY_RELOC_LITUSE_JSRDIRECT:
+ fixP->fx_offset = LITUSE_SW_64_JSRDIRECT;
+ goto do_lituse;
+ do_lituse:
+ fixP->fx_addsy = section_symbol (now_seg);
+ fixP->fx_r_type = BFD_RELOC_SW_64_LITUSE;
+
+ info = get_sw_64_reloc_tag (insn->sequence);
+ if (fixup->reloc == DUMMY_RELOC_LITUSE_TLSGD)
+ info->saw_lu_tlsgd = 1;
+ else if (fixup->reloc == DUMMY_RELOC_LITUSE_TLSLDM)
+ info->saw_lu_tlsldm = 1;
+ if (++info->n_slaves > 1)
+ {
+ if (info->saw_lu_tlsgd)
+ as_bad (_ ("too many lituse insns for !lituse_tlsgd!%ld"),
+ insn->sequence);
+ else if (info->saw_lu_tlsldm)
+ as_bad (_ ("too many lituse insns for !lituse_tlsldm!%ld"),
+ insn->sequence);
+ }
+ fixP->tc_fix_data.info = info;
+ fixP->tc_fix_data.next_reloc = info->slaves;
+ info->slaves = fixP;
+ if (info->segment != now_seg)
+ info->multi_section_p = 1;
+ break;
+
+ case BFD_RELOC_SW_64_TLSREL_GOT:
+ if (insn->sequence == 0)
+ break;
+ info = get_sw_64_reloc_tag (insn->sequence);
+ info->tlsrel_got = fixP;
+ fixP->tc_fix_data.info = info;
+ break;
+
+ case BFD_RELOC_SW_64_TLSGD:
+ fixP->fx_no_overflow = 1;
+
+ if (insn->sequence == 0)
+ break;
+ info = get_sw_64_reloc_tag (insn->sequence);
+ if (info->saw_tlsgd)
+ as_bad (_ ("duplicate !tlsgd!%ld"), insn->sequence);
+ else if (info->saw_tlsldm)
+ as_bad (_ ("sequence number in use for !tlsldm!%ld"),
+ insn->sequence);
+ else
+ info->saw_tlsgd = 1;
+ info->tlsgd = fixP;
+ fixP->tc_fix_data.info = info;
+ break;
+
+ case BFD_RELOC_SW_64_TLSLDM:
+ fixP->fx_no_overflow = 1;
+
+ if (insn->sequence == 0)
+ break;
+ info = get_sw_64_reloc_tag (insn->sequence);
+ if (info->saw_tlsldm)
+ as_bad (_ ("duplicate !tlsldm!%ld"), insn->sequence);
+ else if (info->saw_tlsgd)
+ as_bad (_ ("sequence number in use for !tlsgd!%ld"),
+ insn->sequence);
+ else
+ info->saw_tlsldm = 1;
+ info->tlsldm = fixP;
+ fixP->tc_fix_data.info = info;
+ break;
+#endif
+ case BFD_RELOC_SW_64_GOTDTPREL16:
+ fixP->fx_no_overflow = 1;
+
+ if (insn->sequence == 0)
+ break;
+ info = get_sw_64_reloc_tag (insn->sequence);
+ info->gotdtprel = fixP;
+ info->master = fixP;
+ info->n_master++;
+ fixP->tc_fix_data.info = info;
+ break;
+
+ case BFD_RELOC_SW_64_GOTTPREL16:
+ fixP->fx_no_overflow = 1;
+
+ if (insn->sequence == 0)
+ break;
+ info = get_sw_64_reloc_tag (insn->sequence);
+ info->gottprel = fixP;
+ info->master = fixP;
+ info->n_master++;
+ fixP->tc_fix_data.info = info;
+ break;
+#ifdef OBJ_EVAX
+ case BFD_RELOC_SW_64_NOP:
+ case BFD_RELOC_SW_64_LDA:
+ case BFD_RELOC_SW_64_BSR:
+ case BFD_RELOC_SW_64_BOH:
+ info = get_sw_64_reloc_tag (next_sequence_num--);
+ fixP->tc_fix_data.info = info;
+ fixP->tc_fix_data.info->sym = fixup->xtrasym;
+ fixP->tc_fix_data.info->psym = fixup->procsym;
+ break;
+#endif
+
+ default:
+ if ((int) fixup->reloc < 0)
+ {
+ if (operand->flags & AXP_OPERAND_NOOVERFLOW)
+ fixP->fx_no_overflow = 1;
+ }
+ break;
+ }
+ }
+}
+
+/* Insert an operand value into an instruction. */
+
+static unsigned
+insert_operand (unsigned insn, const struct sw_64_operand *operand, offsetT val,
+ const char *file, unsigned line)
+{
+ if (operand->bits != 32 && !(operand->flags & AXP_OPERAND_NOOVERFLOW))
+ {
+ offsetT min, max;
+
+ if (operand->flags & AXP_OPERAND_SIGNED)
+ {
+ max = (1 << (operand->bits - 1)) - 1;
+ min = -(1 << (operand->bits - 1));
+ }
+ else
+ {
+ max = (1 << operand->bits) - 1;
+ min = 0;
+ }
+
+ if (val < min || val > max)
+ as_warn_value_out_of_range (_ ("operand"), val, min, max, file, line);
+ }
+
+ if (operand->insert)
+ {
+ const char *errmsg = NULL;
+
+ insn = (*operand->insert) (insn, val, &errmsg);
+ if (errmsg)
+ as_warn ("%s", errmsg);
+ }
+ else
+ insn |= ((val & ((1 << operand->bits) - 1)) << operand->shift);
+
+ return insn;
+}
+
+static unsigned int need_rd_f = 0;
+static unsigned int next_insn = 0;
+static unsigned int register1 = 0;
+static char lstname[5];
+/* Turn an opcode description and a set of arguments into
+ an instruction and a fixup. */
+
+static void
+assemble_insn (const struct sw_64_opcode *opcode, const expressionS *tok,
+ int ntok, struct sw_64_insn *insn,
+ extended_bfd_reloc_code_real_type reloc)
+{
+ const struct sw_64_operand *reloc_operand = NULL;
+ const expressionS *reloc_exp = NULL;
+ const unsigned char *argidx;
+ unsigned image;
+ int tokidx = 0;
+ next_insn++;
+
+ memset (insn, 0, sizeof (*insn));
+ image = opcode->opcode;
+
+ for (argidx = opcode->operands; *argidx; ++argidx)
+ {
+ const struct sw_64_operand *operand = &sw_64_operands[*argidx];
+ const expressionS *t = (const expressionS *) 0;
+
+ if (operand->flags & AXP_OPERAND_FAKE)
+ {
+ /* Fake operands take no value and generate no fixup. */
+ image = insert_operand (image, operand, 0, NULL, 0);
+ continue;
+ }
+
+ if (tokidx >= ntok)
+ {
+ switch (operand->flags & AXP_OPERAND_OPTIONAL_MASK)
+ {
+ case AXP_OPERAND_DEFAULT_FIRST:
+ t = &tok[0];
+ break;
+ case AXP_OPERAND_DEFAULT_SECOND:
+ t = &tok[1];
+ break;
+ case AXP_OPERAND_DEFAULT_THIRD:
+ t = &tok[2];
+ break;
+ case AXP_OPERAND_DEFAULT_ZERO: {
+ static expressionS zero_exp;
+ t = &zero_exp;
+ zero_exp.X_op = O_constant;
+ zero_exp.X_unsigned = 1;
+ }
+ break;
+ default:
+ abort ();
+ }
+ }
+ else
+ t = &tok[tokidx++];
+
+ switch (t->X_op)
+ {
+ case O_register:
+ case O_pregister:
+ case O_cpregister:
+ image
+ = insert_operand (image, operand, regno (t->X_add_number), NULL, 0);
+ break;
+
+ case O_constant:
+ image = insert_operand (image, operand, t->X_add_number, NULL, 0);
+ reloc_operand = operand;
+ reloc_exp = t;
+ break;
+
+ default:
+ /* This is only 0 for fields that should contain registers,
+ which means this pattern shouldn't have matched. */
+ if (operand->default_reloc == 0)
+ abort ();
+
+ /* There is one special case for which an insn receives two
+ relocations, and thus the user-supplied reloc does not
+ override the operand reloc. */
+ if (operand->default_reloc == BFD_RELOC_SW_64_HINT)
+ {
+ struct sw_64_fixup *fixup;
+
+ if (insn->nfixups >= MAX_INSN_FIXUPS)
+ as_fatal (_ ("too many fixups"));
+
+ fixup = &insn->fixups[insn->nfixups++];
+ fixup->exp = *t;
+ fixup->reloc = BFD_RELOC_SW_64_HINT;
+ }
+ else
+ {
+ if (reloc == BFD_RELOC_UNUSED)
+ reloc = operand->default_reloc;
+
+ gas_assert (reloc_operand == NULL);
+ reloc_operand = operand;
+ reloc_exp = t;
+ }
+ break;
+ }
+ }
+
+ if (reloc != BFD_RELOC_UNUSED)
+ {
+ struct sw_64_fixup *fixup;
+
+ if (insn->nfixups >= MAX_INSN_FIXUPS)
+ as_fatal (_ ("too many fixups"));
+
+ /* ??? My but this is hacky. But the OSF/1 assembler uses the same
+ relocation tag for both ldih and ldi with gpdisp. Choose the
+ correct internal relocation based on the opcode. */
+ if (reloc == BFD_RELOC_SW_64_GPDISP)
+ {
+ if (strcmp (opcode->name, "ldih") == 0)
+ reloc = BFD_RELOC_SW_64_GPDISP_HI16;
+ else if (strcmp (opcode->name, "ldi") == 0)
+ reloc = BFD_RELOC_SW_64_GPDISP_LO16;
+ else
+ as_bad (_ ("invalid relocation for instruction"));
+ }
+
+ /* If this is a real relocation (as opposed to a lituse hint), then
+ the relocation width should match the operand width.
+ Take care of -MDISP in operand table. */
+ else if (reloc < BFD_RELOC_UNUSED && reloc > 0)
+ {
+ if (reloc == BFD_RELOC_SW_64_BRSGP
+ && (strcmp (opcode->name, "lbr") == 0))
+ reloc = BFD_RELOC_SW_64_BR26;
+
+ reloc_howto_type *reloc_howto
+ = bfd_reloc_type_lookup (stdoutput,
+ (bfd_reloc_code_real_type) reloc);
+ if (reloc_operand == NULL
+ || reloc_howto->bitsize != reloc_operand->bits)
+ {
+ as_bad (_ ("invalid relocation for field"));
+ return;
+ }
+ }
+
+ fixup = &insn->fixups[insn->nfixups++];
+ if (reloc_exp)
+ fixup->exp = *reloc_exp;
+ else
+ fixup->exp.X_op = O_absent;
+ fixup->reloc = reloc;
+ }
+
+ insn->insn = image;
+
+ if (!strcmp (sw_64_target_name, "sw6a") || !strcmp (sw_64_target_name, "sw6b")
+ || !strcmp (sw_64_target_name, "sw8a"))
+ {
+ if (!strcmp (opcode->name, "lstw") || !strcmp (opcode->name, "lstl")
+ || !strcmp (opcode->name, "stl_c") || !strcmp (opcode->name, "stq_c"))
+ {
+ sw_64_align (3, (char *) NULL, sw_64_insn_label, 0);
+ need_rd_f = 1;
+ next_insn = 0;
+ register1 = tok[0].X_add_number;
+ strcpy (lstname, opcode->name);
+ }
+ if (!strcmp (sw_64_target_name, "sw6a")
+ || !strcmp (sw_64_target_name, "sw6b"))
+ {
+ if (strcmp (opcode->name, "rd_f") && need_rd_f && (next_insn == 1))
+ as_bad (_ ("missing \"rd_f\" before \"%s\" !!"),
+ (char *) opcode->name);
+ }
+ if (!strcmp (opcode->name, "rd_f") && (register1 != tok[0].X_add_number))
+ as_bad (_ (" %s and rd_f the first register must be the same"),
+ lstname);
+ }
+}
+
+/* Handle all "simple" integer register loads -- ldl, ldl_l, ldl_u,
+ etc. They differ from the real instructions in that they do simple
+ expressions like the ldi macro. */
+
+static void
+emit_ir_load (const expressionS *tok, int ntok, const void *opname)
+{
+ int basereg;
+ long lituse;
+ expressionS newtok[3];
+ struct sw_64_insn insn;
+ const char *symname
+ = tok[1].X_add_symbol ? S_GET_NAME (tok[1].X_add_symbol) : "";
+ int symlen = strlen (symname);
+
+ if (ntok == 2)
+ basereg = (tok[1].X_op == O_constant ? AXP_REG_ZERO : sw_64_gp_register);
+ else
+ basereg = tok[2].X_add_number;
+
+ lituse = load_expression (tok[0].X_add_number, &tok[1], &basereg, &newtok[1],
+ (const char *) opname);
+
+ if (basereg == sw_64_gp_register
+ && (symlen > 4 && strcmp (&symname[symlen - 4], "..lk") == 0))
+ return;
+
+ newtok[0] = tok[0];
+ set_tok_preg (newtok[2], basereg);
+
+ assemble_tokens_to_insn ((const char *) opname, newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BASE;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+}
+
+/* Handle fp register loads, and both integer and fp register stores.
+ Again, we handle simple expressions. */
+
+static void
+emit_loadstore (const expressionS *tok, int ntok, const void *opname)
+{
+ int basereg;
+ long lituse;
+ expressionS newtok[3];
+ struct sw_64_insn insn;
+
+ if (ntok == 2)
+ basereg = (tok[1].X_op == O_constant ? AXP_REG_ZERO : sw_64_gp_register);
+ else
+ basereg = tok[2].X_add_number;
+
+ if (tok[1].X_op != O_constant || !range_signed_16 (tok[1].X_add_number))
+ {
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+ else
+ as_warn (_ ("assembler requires $28 register for the marco !"));
+
+ lituse = load_expression (AXP_REG_AT, &tok[1], &basereg, &newtok[1],
+ (const char *) opname);
+ }
+ else
+ {
+ newtok[1] = tok[1];
+ lituse = 0;
+ }
+
+ newtok[0] = tok[0];
+ set_tok_preg (newtok[2], basereg);
+
+ assemble_tokens_to_insn ((const char *) opname, newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BASE;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+}
+
+/* Load a half-word or byte as an unsigned value. */
+
+static void
+emit_ldXu (const expressionS *tok, int ntok, const void *vlgsize)
+{
+ if (sw_64_target & AXP_OPCODE_SW6)
+ emit_ir_load (tok, ntok, ldXu_op[(long) vlgsize]);
+ else
+ {
+ expressionS newtok[3];
+ struct sw_64_insn insn;
+ int basereg;
+ long lituse;
+
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+
+ if (ntok == 2)
+ basereg
+ = (tok[1].X_op == O_constant ? AXP_REG_ZERO : sw_64_gp_register);
+ else
+ basereg = tok[2].X_add_number;
+
+ /* Emit "ldi $at, exp". */
+ lituse = load_expression (AXP_REG_AT, &tok[1], &basereg, NULL, "ldi");
+
+ /* Emit "ldl_u targ, 0 ($at)". */
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], basereg);
+ assemble_tokens_to_insn ("ldl_u", newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BASE;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+
+ /* Emit "extXl targ, $at, targ". */
+ set_tok_reg (newtok[1], basereg);
+ newtok[2] = newtok[0];
+ assemble_tokens_to_insn (extXl_op[(long) vlgsize], newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BYTOFF;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+ }
+}
+
+/* Load a half-word or byte as a signed value. */
+
+static void
+emit_ldX (const expressionS *tok, int ntok, const void *vlgsize)
+{
+ emit_ldXu (tok, ntok, vlgsize);
+ assemble_tokens (sextX_op[(long) vlgsize], tok, 1, 1);
+}
+
+/* Load an integral value from an unaligned address as an unsigned
+ value. */
+
+static void
+emit_uldXu (const expressionS *tok, int ntok, const void *vlgsize)
+{
+ long lgsize = (long) vlgsize;
+ expressionS newtok[3];
+
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+
+ /* Emit "ldi $at, exp". */
+ memcpy (newtok, tok, sizeof (expressionS) * ntok);
+ newtok[0].X_add_number = AXP_REG_AT;
+ assemble_tokens ("ldi", newtok, ntok, 1);
+
+ /* Emit "ldl_u $t9, 0 ($at)". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], AXP_REG_AT);
+ assemble_tokens ("ldl_u", newtok, 3, 1);
+
+ /* Emit "ldl_u $t10, size-1 ($at)". */
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ set_tok_const (newtok[1], (1 << lgsize) - 1);
+ assemble_tokens ("ldl_u", newtok, 3, 1);
+
+ /* Emit "extXl $t9, $at, $t9". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_reg (newtok[1], AXP_REG_AT);
+ set_tok_reg (newtok[2], AXP_REG_T9);
+ assemble_tokens (extXl_op[lgsize], newtok, 3, 1);
+
+ /* Emit "extXh $t10, $at, $t10". */
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ set_tok_reg (newtok[2], AXP_REG_T10);
+ assemble_tokens (extXh_op[lgsize], newtok, 3, 1);
+
+ /* Emit "or $t9, $t10, targ". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_reg (newtok[1], AXP_REG_T10);
+ newtok[2] = tok[0];
+ assemble_tokens ("or", newtok, 3, 1);
+}
+
+/* Load an integral value from an unaligned address as a signed value.
+ Note that quads should get funneled to the unsigned load since we
+ don't have to do the sign extension. */
+
+static void
+emit_uldX (const expressionS *tok, int ntok, const void *vlgsize)
+{
+ emit_uldXu (tok, ntok, vlgsize);
+ assemble_tokens (sextX_op[(long) vlgsize], tok, 1, 1);
+}
+
+/* Implement the ldil macro. */
+
+static void
+emit_ldil (const expressionS *tok, int ntok,
+ const void *unused ATTRIBUTE_UNUSED)
+{
+ expressionS newtok[2];
+
+ memcpy (newtok, tok, sizeof (newtok));
+ newtok[1].X_add_number = sign_extend_32 (tok[1].X_add_number);
+
+ assemble_tokens ("ldi", newtok, ntok, 1);
+}
+
+/* Store a half-word or byte. */
+
+static void
+emit_stX (const expressionS *tok, int ntok, const void *vlgsize)
+{
+ int lgsize = (int) (long) vlgsize;
+
+ if (sw_64_target & AXP_OPCODE_SW6)
+ emit_loadstore (tok, ntok, stX_op[lgsize]);
+ else
+ {
+ expressionS newtok[3];
+ struct sw_64_insn insn;
+ int basereg;
+ long lituse;
+
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+
+ if (ntok == 2)
+ basereg
+ = (tok[1].X_op == O_constant ? AXP_REG_ZERO : sw_64_gp_register);
+ else
+ basereg = tok[2].X_add_number;
+
+ /* Emit "ldi $at, exp". */
+ lituse = load_expression (AXP_REG_AT, &tok[1], &basereg, NULL, "ldi");
+
+ /* Emit "ldl_u $t9, 0 ($at)". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], basereg);
+ assemble_tokens_to_insn ("ldl_u", newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BASE;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+
+ /* Emit "insXl src, $at, $t10". */
+ newtok[0] = tok[0];
+ set_tok_reg (newtok[1], basereg);
+ set_tok_reg (newtok[2], AXP_REG_T10);
+ assemble_tokens_to_insn (insXl_op[lgsize], newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BYTOFF;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+
+ /* Emit "mskXl $t9, $at, $t9". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ newtok[2] = newtok[0];
+ assemble_tokens_to_insn (mskXl_op[lgsize], newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BYTOFF;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+
+ /* Emit "or $t9, $t10, $t9". */
+ set_tok_reg (newtok[1], AXP_REG_T10);
+ assemble_tokens ("or", newtok, 3, 1);
+
+ /* Emit "stq_u $t9, 0 ($at). */
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], AXP_REG_AT);
+ assemble_tokens_to_insn ("stl_u", newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_BASE;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+ emit_insn (&insn);
+ }
+}
+
+/* Store an integer to an unaligned address. */
+
+static void
+emit_ustX (const expressionS *tok, int ntok, const void *vlgsize)
+{
+ int lgsize = (int) (long) vlgsize;
+ expressionS newtok[3];
+
+ /* Emit "ldi $at, exp". */
+ memcpy (newtok, tok, sizeof (expressionS) * ntok);
+ newtok[0].X_add_number = AXP_REG_AT;
+ assemble_tokens ("ldi", newtok, ntok, 1);
+
+ /* Emit "ldl_u $9, 0 ($at)". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], AXP_REG_AT);
+ assemble_tokens ("ldl_u", newtok, 3, 1);
+
+ /* Emit "ldl_u $10, size-1 ($at)". */
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ set_tok_const (newtok[1], (1 << lgsize) - 1);
+ assemble_tokens ("ldl_u", newtok, 3, 1);
+
+ /* Emit "insXl src, $at, $t11". */
+ newtok[0] = tok[0];
+ set_tok_reg (newtok[1], AXP_REG_AT);
+ set_tok_reg (newtok[2], AXP_REG_T11);
+ assemble_tokens (insXl_op[lgsize], newtok, 3, 1);
+
+ /* Emit "insXh src, $at, $t12". */
+ set_tok_reg (newtok[2], AXP_REG_T12);
+ assemble_tokens (insXh_op[lgsize], newtok, 3, 1);
+
+ /* Emit "mskXl $t9, $at, $t9". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ newtok[2] = newtok[0];
+ assemble_tokens (mskXl_op[lgsize], newtok, 3, 1);
+
+ /* Emit "mskXh $t10, $at, $t10". */
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ newtok[2] = newtok[0];
+ assemble_tokens (mskXh_op[lgsize], newtok, 3, 1);
+
+ /* Emit "or $t9, $t11, $t9". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_reg (newtok[1], AXP_REG_T11);
+ newtok[2] = newtok[0];
+ assemble_tokens ("or", newtok, 3, 1);
+
+ /* Emit "or $t10, $t12, $t10". */
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ set_tok_reg (newtok[1], AXP_REG_T12);
+ newtok[2] = newtok[0];
+ assemble_tokens ("or", newtok, 3, 1);
+
+ /* Emit "stq_u $t10, size-1 ($at)". */
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ set_tok_const (newtok[1], (1 << lgsize) - 1);
+ set_tok_preg (newtok[2], AXP_REG_AT);
+ assemble_tokens ("stl_u", newtok, 3, 1);
+
+ /* Emit "stq_u $t9, 0 ($at)". */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_const (newtok[1], 0);
+ assemble_tokens ("stl_u", newtok, 3, 1);
+}
+
+/* Sign extend a half-word or byte. The 32-bit sign extend is
+ implemented as "addl $31, $r, $t" in the opcode table. */
+
+static void
+emit_sextX (const expressionS *tok, int ntok, const void *vlgsize)
+{
+ long lgsize = (long) vlgsize;
+
+ if (sw_64_target & AXP_OPCODE_SW6)
+ assemble_tokens (sextX_op[lgsize], tok, ntok, 0);
+ else
+ {
+ int bitshift = 64 - 8 * (1 << lgsize);
+ expressionS newtok[3];
+
+ /* Emit "sll src,bits,dst". */
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], bitshift);
+ newtok[2] = tok[ntok - 1];
+ assemble_tokens ("sll", newtok, 3, 1);
+
+ /* Emit "sra dst,bits,dst". */
+ newtok[0] = newtok[2];
+ assemble_tokens ("sra", newtok, 3, 1);
+ }
+}
+
+static void
+emit_vlogx (const expressionS *tok, int ntok,
+ const void *unused ATTRIBUTE_UNUSED)
+{
+ unsigned int mask = 0;
+ struct sw_64_insn insn;
+ assemble_tokens_to_insn ("vlog", tok, ntok - 1, &insn);
+ mask = (tok[4].X_add_number >> 6) << 26;
+ mask += (tok[4].X_add_number & 0x3f) << 10;
+ insn.insn |= mask;
+ emit_insn (&insn);
+}
+
+/* Implement the division and modulus macros. */
+
+#ifdef OBJ_EVAX
+
+/* Make register usage like in normal procedure call.
+ Don't clobber PV and RA. */
+
+static void
+emit_division (const expressionS *tok, int ntok, const void *symname)
+{
+ if (strcmp (sw_64_target_name, "sw6a") == 0
+ || strcmp (sw_64_target_name, "sw6b") == 0)
+ {
+ /* DIVISION and MODULUS. Yech.
+
+ Convert
+ OP x,y,result
+ to
+ mov x,R16 # if x != R16
+ mov y,R17 # if y != R17
+ ldi AT,__OP
+ call AT,(AT),0
+ mov R0,result
+
+ with appropriate optimizations if R0,R16,R17 are the registers
+ specified by the compiler. */
+
+ int xr, yr, rr;
+ symbolS *sym;
+ expressionS newtok[3];
+
+ xr = regno (tok[0].X_add_number);
+ yr = regno (tok[1].X_add_number);
+
+ if (ntok < 3)
+ rr = xr;
+ else
+ rr = regno (tok[2].X_add_number);
+
+ /* Move the operands into the right place. */
+ if (yr == AXP_REG_R16 && xr == AXP_REG_R17)
+ {
+ /* They are in exactly the wrong order -- swap through AT. */
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+
+ set_tok_reg (newtok[0], AXP_REG_R16);
+ set_tok_reg (newtok[1], AXP_REG_AT);
+ assemble_tokens ("mov", newtok, 2, 1);
+
+ set_tok_reg (newtok[0], AXP_REG_R17);
+ set_tok_reg (newtok[1], AXP_REG_R16);
+ assemble_tokens ("mov", newtok, 2, 1);
+
+ set_tok_reg (newtok[0], AXP_REG_AT);
+ set_tok_reg (newtok[1], AXP_REG_R17);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+ else
+ {
+ if (yr == AXP_REG_R16)
+ {
+ set_tok_reg (newtok[0], AXP_REG_R16);
+ set_tok_reg (newtok[1], AXP_REG_R17);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+
+ if (xr != AXP_REG_R16)
+ {
+ set_tok_reg (newtok[0], xr);
+ set_tok_reg (newtok[1], AXP_REG_R16);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+
+ if (yr != AXP_REG_R16 && yr != AXP_REG_R17)
+ {
+ set_tok_reg (newtok[0], yr);
+ set_tok_reg (newtok[1], AXP_REG_R17);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+ }
+
+ sym = symbol_find_or_make ((const char *) symname);
+
+ set_tok_reg (newtok[0], AXP_REG_AT);
+ set_tok_sym (newtok[1], sym, 0);
+ assemble_tokens ("ldi", newtok, 2, 1);
+
+ /* Call the division routine. */
+ set_tok_reg (newtok[0], AXP_REG_AT);
+ set_tok_cpreg (newtok[1], AXP_REG_AT);
+ set_tok_const (newtok[2], 0);
+ assemble_tokens ("call", newtok, 3, 1);
+
+ /* Move the result to the right place. */
+ if (rr != AXP_REG_R0)
+ {
+ set_tok_reg (newtok[0], AXP_REG_R0);
+ set_tok_reg (newtok[1], rr);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+ }
+ else
+ {
+ if (strcmp (symname, "__divw") == 0)
+ assemble_tokens ("divw", tok, 3, 0);
+ if (strcmp (symname, "__divl") == 0)
+ assemble_tokens ("divl", tok, 3, 0);
+ if (strcmp (symname, "__remw") == 0)
+ assemble_tokens ("remw", tok, 3, 0);
+ if (strcmp (symname, "__reml") == 0)
+ assemble_tokens ("reml", tok, 3, 0);
+ }
+}
+
+#else /* !OBJ_EVAX */
+
+static void
+emit_division (const expressionS *tok, int ntok, const void *symname)
+{
+ if (strcmp (sw_64_target_name, "sw6a") == 0
+ || strcmp (sw_64_target_name, "sw6b") == 0)
+ {
+ /* DIVISION and MODULUS. Yech.
+ Convert
+ OP x,y,result
+ to
+ ldi pv,__OP
+ mov x,t10
+ mov y,t11
+ call t9,(pv),__OP
+ mov t12,result
+
+ with appropriate optimizations if t10,t11,t12 are the registers
+ specified by the compiler. */
+
+ int xr, yr, rr;
+ symbolS *sym;
+ expressionS newtok[3];
+
+ xr = regno (tok[0].X_add_number);
+ yr = regno (tok[1].X_add_number);
+
+ if (ntok < 3)
+ rr = xr;
+ else
+ rr = regno (tok[2].X_add_number);
+
+ sym = symbol_find_or_make ((const char *) symname);
+
+ /* Move the operands into the right place. */
+ if (yr == AXP_REG_T10 && xr == AXP_REG_T11)
+ {
+ /* They are in exactly the wrong order -- swap through AT. */
+ if (sw_64_noat_on)
+ as_bad (_ ("macro requires $at register while noat in effect"));
+
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ set_tok_reg (newtok[1], AXP_REG_AT);
+ assemble_tokens ("mov", newtok, 2, 1);
+
+ set_tok_reg (newtok[0], AXP_REG_T11);
+ set_tok_reg (newtok[1], AXP_REG_T10);
+ assemble_tokens ("mov", newtok, 2, 1);
+
+ set_tok_reg (newtok[0], AXP_REG_AT);
+ set_tok_reg (newtok[1], AXP_REG_T11);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+ else
+ {
+ if (yr == AXP_REG_T10)
+ {
+ set_tok_reg (newtok[0], AXP_REG_T10);
+ set_tok_reg (newtok[1], AXP_REG_T11);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+
+ if (xr != AXP_REG_T10)
+ {
+ set_tok_reg (newtok[0], xr);
+ set_tok_reg (newtok[1], AXP_REG_T10);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+
+ if (yr != AXP_REG_T10 && yr != AXP_REG_T11)
+ {
+ set_tok_reg (newtok[0], yr);
+ set_tok_reg (newtok[1], AXP_REG_T11);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+ }
+
+ /* Call the division routine. */
+ set_tok_reg (newtok[0], AXP_REG_T9);
+ set_tok_sym (newtok[1], sym, 0);
+ assemble_tokens ("call", newtok, 2, 1);
+
+ /* Reload the GP register. */
+#ifdef OBJ_AOUT
+ FIXME
+#endif
+#if defined(OBJ_ECOFF) || defined(OBJ_ELF)
+ set_tok_reg (newtok[0], sw_64_gp_register);
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], AXP_REG_T9);
+ assemble_tokens ("ldgp", newtok, 3, 1);
+#endif
+
+ /* Move the result to the right place. */
+ if (rr != AXP_REG_T12)
+ {
+ set_tok_reg (newtok[0], AXP_REG_T12);
+ set_tok_reg (newtok[1], rr);
+ assemble_tokens ("mov", newtok, 2, 1);
+ }
+ }
+ else
+ {
+ if (strcmp (symname, "__divw") == 0)
+ assemble_tokens ("divw", tok, 3, 0);
+ if (strcmp (symname, "__divl") == 0)
+ assemble_tokens ("divl", tok, 3, 0);
+ if (strcmp (symname, "__remw") == 0)
+ assemble_tokens ("remw", tok, 3, 0);
+ if (strcmp (symname, "__reml") == 0)
+ assemble_tokens ("reml", tok, 3, 0);
+ }
+}
+
+#endif /* !OBJ_EVAX */
+
+/* The call and jmp macros differ from their instruction counterparts
+ in that they can load the target address and default most
+ everything. */
+
+static void
+emit_jsrjmp (const expressionS *tok, int ntok, const void *vopname)
+{
+ const char *opname = (const char *) vopname;
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+ int r, tokidx = 0;
+ long lituse = 0;
+
+ if (tokidx < ntok && tok[tokidx].X_op == O_register)
+ r = regno (tok[tokidx++].X_add_number);
+ else
+ r = strcmp (opname, "jmp") == 0 ? AXP_REG_ZERO : AXP_REG_RA;
+
+ set_tok_reg (newtok[0], r);
+
+ if (tokidx < ntok
+ && (tok[tokidx].X_op == O_pregister || tok[tokidx].X_op == O_cpregister))
+ r = regno (tok[tokidx++].X_add_number);
+#ifdef OBJ_EVAX
+ /* Keep register if call $n.<sym>. */
+#else
+ else
+ {
+ int basereg = sw_64_gp_register;
+ lituse = load_expression (r = AXP_REG_PV, &tok[tokidx], &basereg, NULL,
+ opname);
+ }
+#endif
+
+ set_tok_cpreg (newtok[1], r);
+
+#ifndef OBJ_EVAX
+ if (tokidx < ntok)
+ newtok[2] = tok[tokidx];
+ else
+#endif
+ set_tok_const (newtok[2], 0);
+
+ assemble_tokens_to_insn (opname, newtok, 3, &insn);
+
+ if (lituse)
+ {
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ insn.fixups[insn.nfixups].reloc = DUMMY_RELOC_LITUSE_JSR;
+ insn.fixups[insn.nfixups].exp.X_op = O_absent;
+ insn.nfixups++;
+ insn.sequence = lituse;
+ }
+
+#ifdef OBJ_EVAX
+ if (sw_64_flag_replace && r == AXP_REG_RA && tok[tokidx].X_add_symbol
+ && sw_64_linkage_symbol)
+ {
+ /* Create a BOH reloc for 'call $27,NAME'. */
+ const char *symname = S_GET_NAME (tok[tokidx].X_add_symbol);
+ int symlen = strlen (symname);
+ char *ensymname;
+
+ /* Build the entry name as 'NAME..en'. */
+ ensymname = XNEWVEC (char, symlen + 5);
+ memcpy (ensymname, symname, symlen);
+ memcpy (ensymname + symlen, "..en", 5);
+
+ gas_assert (insn.nfixups < MAX_INSN_FIXUPS);
+ if (insn.nfixups > 0)
+ {
+ memmove (&insn.fixups[1], &insn.fixups[0],
+ sizeof (struct sw_64_fixup) * insn.nfixups);
+ }
+
+ /* The fixup must be the same as the BFD_RELOC_SW_64_NOP
+ case in load_expression. See B.4.5.2 of the OpenVMS
+ Linker Utility Manual. */
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_BOH;
+ insn.fixups[0].exp.X_op = O_symbol;
+ insn.fixups[0].exp.X_add_symbol = symbol_find_or_make (ensymname);
+ insn.fixups[0].exp.X_add_number = 0;
+ insn.fixups[0].xtrasym = sw_64_linkage_symbol;
+ insn.fixups[0].procsym = sw_64_evax_proc->symbol;
+ insn.nfixups++;
+ sw_64_linkage_symbol = 0;
+ free (ensymname);
+ }
+#endif
+
+ emit_insn (&insn);
+}
+
+/* The ret and jcr instructions differ from their instruction
+ counterparts in that everything can be defaulted. */
+
+static void
+emit_retjcr (const expressionS *tok, int ntok, const void *vopname)
+{
+ const char *opname = (const char *) vopname;
+ expressionS newtok[3];
+ int r, tokidx = 0;
+
+ if (tokidx < ntok && tok[tokidx].X_op == O_register)
+ r = regno (tok[tokidx++].X_add_number);
+ else
+ r = AXP_REG_ZERO;
+
+ set_tok_reg (newtok[0], r);
+
+ if (tokidx < ntok
+ && (tok[tokidx].X_op == O_pregister || tok[tokidx].X_op == O_cpregister))
+ r = regno (tok[tokidx++].X_add_number);
+ else
+ r = AXP_REG_RA;
+
+ set_tok_cpreg (newtok[1], r);
+
+ if (tokidx < ntok)
+ newtok[2] = tok[tokidx];
+ else
+ set_tok_const (newtok[2], strcmp (opname, "ret") == 0);
+
+ assemble_tokens (opname, newtok, 3, 0);
+}
+
+/* Implement the ldgp macro. */
+
+static void
+emit_ldgp (const expressionS *tok ATTRIBUTE_UNUSED, int ntok ATTRIBUTE_UNUSED,
+ const void *unused ATTRIBUTE_UNUSED)
+{
+#ifdef OBJ_AOUT
+ FIXME
+#endif
+#if defined (OBJ_ECOFF) || defined (OBJ_ELF)
+ /* from "ldgp r1,n (r2)", generate "ldih r1,X (R2); ldi r1,Y (r1)"
+ with appropriate constants and relocations. */
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+ expressionS addend;
+
+#ifdef OBJ_ECOFF
+ if (regno (tok[2].X_add_number) == AXP_REG_PV)
+ ecoff_set_gp_prolog_size (0);
+#endif
+
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], 0);
+ newtok[2] = tok[2];
+
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+
+ addend = tok[1];
+
+#ifdef OBJ_ECOFF
+ if (addend.X_op != O_constant)
+ as_bad (_ ("can not resolve expression"));
+ addend.X_op = O_symbol;
+ addend.X_add_symbol = sw_64_gp_symbol;
+#endif
+
+ insn.nfixups = 1;
+ insn.fixups[0].exp = addend;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_GPDISP_HI16;
+ insn.sequence = next_sequence_num;
+
+ emit_insn (&insn);
+
+ set_tok_preg (newtok[2], tok[0].X_add_number);
+
+ assemble_tokens_to_insn ("ldi", newtok, 3, &insn);
+
+#ifdef OBJ_ECOFF
+ addend.X_add_number += 4;
+#endif
+
+ insn.nfixups = 1;
+ insn.fixups[0].exp = addend;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_GPDISP_LO16;
+ insn.sequence = next_sequence_num--;
+
+ emit_insn (&insn);
+#endif /* OBJ_ECOFF || OBJ_ELF */
+}
+
+/* The macro table. */
+
+static const struct sw_64_macro sw_64_macros[] =
+{
+ { "vlog", emit_vlogx, NULL,
+ { MACRO_FPR, MACRO_FPR, MACRO_FPR, MACRO_FPR, MACRO_EOA } },
+/* Load/Store macros. */
+ { "ldi", emit_ldi, NULL,
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldih", emit_ldih, NULL,
+ { MACRO_IR, MACRO_EXP, MACRO_EOA } },
+
+ { "ldw", emit_ir_load, "ldw",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldl", emit_ir_load, "ldl",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldl_u", emit_ir_load, "ldl_u",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldw_inc", emit_ir_load, "ldw_inc",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldl_inc", emit_ir_load, "ldl_inc",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldw_dec", emit_ir_load, "ldw_dec",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldl_dec", emit_ir_load, "ldl_dec",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldw_set", emit_ir_load, "ldw_set",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldl_set", emit_ir_load, "ldl_set",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "flds", emit_loadstore, "flds",
+ { MACRO_FPR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "fldd", emit_loadstore, "fldd",
+ { MACRO_FPR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+
+ { "ldb", emit_ldX, (void *) 0,
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "ldh", emit_ldX, (void *) 1,
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+
+ { "ldgp", emit_ldgp, NULL,
+ { MACRO_IR, MACRO_EXP, MACRO_PIR, MACRO_EOA } },
+ { "stw", emit_loadstore, "stw",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "stl", emit_loadstore, "stl",
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "fsts", emit_loadstore, "fsts",
+ { MACRO_FPR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "fstd", emit_loadstore, "fstd",
+ { MACRO_FPR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+ { "stb", emit_stX, (void *) 0,
+ { MACRO_IR, MACRO_EXP, MACRO_OPIR, MACRO_EOA } },
+
+/* Arithmetic macros. */
+ { "sextb", emit_sextX, (void *) 0,
+ { MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EOA,
+ /* MACRO_EXP, MACRO_IR, MACRO_EOA */ } },
+ { "sexth", emit_sextX, (void *) 1,
+ { MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EOA,
+ /* MACRO_EXP, MACRO_IR, MACRO_EOA */ } },
+
+ { "divw", emit_division, "__divw",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+ { "divwu", emit_division, "__divwu",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+ { "divl", emit_division, "__divl",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+ { "divlu", emit_division, "__divlu",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+ { "remw", emit_division, "__remw",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+ { "remwu", emit_division, "__remwu",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+ { "reml", emit_division, "__reml",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+ { "remlu", emit_division, "__remlu",
+ { MACRO_IR, MACRO_IR, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_IR, MACRO_EOA,
+ /* MACRO_IR, MACRO_EXP, MACRO_IR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA */ } },
+
+ { "call", emit_jsrjmp, "call",
+ { MACRO_PIR, MACRO_EXP, MACRO_EOA,
+ MACRO_PIR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA,
+ MACRO_EXP, MACRO_EOA } },
+ { "jmp", emit_jsrjmp, "jmp",
+ { MACRO_PIR, MACRO_EXP, MACRO_EOA,
+ MACRO_PIR, MACRO_EOA,
+ MACRO_IR, MACRO_EXP, MACRO_EOA,
+ MACRO_EXP, MACRO_EOA } },
+ { "ret", emit_retjcr, "ret",
+ { MACRO_IR, MACRO_EXP, MACRO_EOA,
+ MACRO_IR, MACRO_EOA,
+ MACRO_PIR, MACRO_EXP, MACRO_EOA,
+ MACRO_PIR, MACRO_EOA,
+ MACRO_EXP, MACRO_EOA,
+ MACRO_EOA } },
+};
+
+static const unsigned int sw_64_num_macros
+ = sizeof (sw_64_macros) / sizeof (*sw_64_macros);
+
+/* Search forward through all variants of a macro looking for a syntax
+ match. */
+
+static const struct sw_64_macro *
+find_macro_match (const struct sw_64_macro *first_macro, const expressionS *tok,
+ int *pntok)
+
+{
+ const struct sw_64_macro *macro = first_macro;
+ int ntok = *pntok;
+
+ do
+ {
+ const enum sw_64_macro_arg *arg = macro->argsets;
+ int tokidx = 0;
+
+ while (*arg)
+ {
+ switch (*arg)
+ {
+ case MACRO_EOA:
+ if (tokidx == ntok)
+ return macro;
+ else
+ tokidx = 0;
+ break;
+
+ /* Index register. */
+ case MACRO_IR:
+ if (tokidx >= ntok || tok[tokidx].X_op != O_register
+ || !is_ir_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ ++tokidx;
+ break;
+
+ /* Parenthesized index register. */
+ case MACRO_PIR:
+ if (tokidx >= ntok || tok[tokidx].X_op != O_pregister
+ || !is_ir_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ ++tokidx;
+ break;
+
+ /* Optional parenthesized index register. */
+ case MACRO_OPIR:
+ if (tokidx < ntok && tok[tokidx].X_op == O_pregister
+ && is_ir_num (tok[tokidx].X_add_number))
+ ++tokidx;
+ break;
+
+ /* Leading comma with a parenthesized index register. */
+ case MACRO_CPIR:
+ if (tokidx >= ntok || tok[tokidx].X_op != O_cpregister
+ || !is_ir_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ ++tokidx;
+ break;
+
+ /* Floating point register. */
+ case MACRO_FPR:
+ if (tokidx >= ntok || tok[tokidx].X_op != O_register
+ || !is_fpr_num (tok[tokidx].X_add_number))
+ goto match_failed;
+ ++tokidx;
+ break;
+
+ /* Normal expression. */
+ case MACRO_EXP:
+ if (tokidx >= ntok)
+ goto match_failed;
+ switch (tok[tokidx].X_op)
+ {
+ case O_illegal:
+ case O_absent:
+ case O_register:
+ case O_pregister:
+ case O_cpregister:
+ case O_literal:
+ case O_lituse_base:
+ case O_lituse_bytoff:
+ case O_lituse_jsr:
+ case O_gpdisp:
+ case O_gprelhigh:
+ case O_gprellow:
+ case O_gprel:
+ case O_samegp:
+ goto match_failed;
+
+ default:
+ break;
+ }
+ ++tokidx;
+ break;
+
+ match_failed:
+ while (*arg != MACRO_EOA)
+ ++arg;
+ tokidx = 0;
+ break;
+ }
+ ++arg;
+ }
+ } while (++macro - sw_64_macros < (int) sw_64_num_macros
+ && !strcmp (macro->name, first_macro->name));
+
+ return NULL;
+}
+
+/* Given an opcode name and a pre-tokenized set of arguments, take the
+ opcode all the way through emission. */
+static void
+assemble_tokens (const char *opname, expressionS *tok, int ntok,
+ int local_macros_on)
+{
+ int found_something = 0;
+ const struct sw_64_opcode *opcode;
+ const struct sw_64_macro *macro;
+ int cpumatch = 1;
+ extended_bfd_reloc_code_real_type reloc = BFD_RELOC_UNUSED;
+
+#ifdef RELOC_OP_P
+ /* If a user-specified relocation is present, this is not a macro. */
+ if (ntok && USER_RELOC_P (tok[ntok - 1].X_op))
+ {
+ if (sw_64_literalgot_on)
+ {
+ if (tok[ntok - 1].X_op == O_literal)
+ {
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+ insn.nfixups = 1;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_ELF_LITERAL_GOT;
+ insn.sequence = tok[ntok - 1].X_add_number;
+ emit_insn (&insn);
+ tok[2].X_add_number = tok[0].X_add_number;
+ }
+ }
+ if (sw_64_tlsrelgot_gottprel_on)
+ {
+ if (tok[ntok - 1].X_op == O_gottprel)
+ {
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+ insn.nfixups = 1;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_TLSREL_GOT;
+ insn.sequence = tok[ntok - 1].X_add_number;
+ emit_insn (&insn);
+ tok[2].X_add_number = tok[0].X_add_number;
+ }
+ }
+
+ if (sw_64_tlsrelgot_gotdtprel_on)
+ {
+ if (tok[ntok - 1].X_op == O_gotdtprel)
+ {
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+ insn.nfixups = 1;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_TLSREL_GOT;
+ insn.sequence = tok[ntok - 1].X_add_number;
+ emit_insn (&insn);
+ tok[2].X_add_number = tok[0].X_add_number;
+ }
+ }
+
+ if (sw_64_tlsrelgot_tlsgd_on)
+ {
+ if (tok[ntok - 1].X_op == O_tlsgd)
+ {
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+ insn.nfixups = 1;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_TLSREL_GOT;
+ insn.sequence = tok[ntok - 1].X_add_number;
+ emit_insn (&insn);
+ tok[2].X_add_number = tok[0].X_add_number;
+ }
+ }
+
+ if (sw_64_tlsrelgot_tlsldm_on)
+ {
+ if (tok[ntok - 1].X_op == O_tlsldm)
+ {
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+ newtok[0] = tok[0];
+ set_tok_const (newtok[1], 0);
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+ insn.nfixups = 1;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_TLSREL_GOT;
+ insn.sequence = tok[ntok - 1].X_add_number;
+ emit_insn (&insn);
+ tok[2].X_add_number = tok[0].X_add_number;
+ }
+ }
+
+ if (sw_64_gprel16_on)
+ {
+ if (tok[ntok - 1].X_op == O_gprel)
+ {
+ if (strncmp (opname, "ldi", strlen ("ldi")) == 0
+ || strncmp (opname, "ldw", strlen ("ldw")) == 0
+ || strncmp (opname, "ldl", strlen ("ldl")) == 0)
+ {
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+
+ newtok[0] = tok[0];
+ newtok[1] = tok[1];
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+ insn.nfixups = 1;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_GPREL_HI16;
+ emit_insn (&insn);
+
+ tok[2].X_add_number = tok[0].X_add_number;
+ tok[ntok - 1].X_op = O_gprellow;
+ }
+ else if (strncmp (opname, "stw", strlen ("stw")) == 0
+ || strncmp (opname, "stl", strlen ("stl")) == 0
+ || strncmp (opname, "flds", strlen ("flds")) == 0
+ || strncmp (opname, "fldd", strlen ("fldd")) == 0
+ || strncmp (opname, "fsts", strlen ("fsts")) == 0
+ || strncmp (opname, "fstd", strlen ("fstd")) == 0)
+ {
+ struct sw_64_insn insn;
+ expressionS newtok[3];
+
+ if (sw_64_noat_on)
+ as_warn (_ (
+ "!gprelhi requires $at register while noat in effect"));
+ else
+ as_warn (
+ _ ("assembler requires $28 register for the !gprelhi !"));
+
+ set_tok_reg (newtok[0], 28);
+ newtok[1] = tok[1];
+ set_tok_preg (newtok[2], sw_64_gp_register);
+ assemble_tokens_to_insn ("ldih", newtok, 3, &insn);
+ insn.nfixups = 1;
+ insn.fixups[0].reloc = BFD_RELOC_SW_64_GPREL_HI16;
+ emit_insn (&insn);
+
+ tok[2].X_add_number = 28;
+ tok[ntok - 1].X_op = O_gprellow;
+ }
+ else
+ {
+ as_warn (_ ("!gprel16(change into gprelhi+gprello) donot "
+ "support this memory access instruction(%s).\n"),
+ opname);
+ }
+ }
+ }
+
+ reloc = SW_64_RELOC_TABLE (tok[ntok - 1].X_op)->reloc;
+ ntok--;
+ }
+ else
+#endif
+ if (local_macros_on)
+ {
+ macro = ((const struct sw_64_macro *) str_hash_find (sw_64_macro_hash,
+ opname));
+ if (macro)
+ {
+ found_something = 1;
+ macro = find_macro_match (macro, tok, &ntok);
+ if (macro)
+ {
+ (*macro->emit) (tok, ntok, macro->arg);
+ return;
+ }
+ }
+ }
+
+ if (memcmp (opname, "vlog", 4) == 0)
+ {
+ unsigned long value;
+ unsigned long length = strchr (opname, 'g') - opname + 1;
+ value = strtol (opname + length, NULL, 16);
+ if (opname[length + 2] != '\0')
+ as_bad (_ ("%s, wrong truth number!!"), opname);
+ else if (value > 0xFF)
+ as_bad (_ ("%s, wrong truth number!!"), opname);
+ else if (value == 0
+ && (opname[length] != '0' || opname[length + 1] != '0'))
+ as_bad (_ ("%s, wrong truth number!!"), opname);
+ macro = ((const struct sw_64_macro *) str_hash_find (sw_64_macro_hash,
+ "vlog"));
+ if (macro)
+ {
+ found_something = 1;
+ macro = find_macro_match (macro, tok, &ntok);
+ if (macro)
+ {
+ expressionS newtok[5];
+ newtok[0] = tok[0];
+ newtok[1] = tok[1];
+ newtok[2] = tok[2];
+ newtok[3] = tok[3];
+ set_tok_const (newtok[4], value);
+ (*macro->emit) (newtok, ntok + 1, macro->arg);
+ return;
+ }
+ }
+ }
+
+ /* Search opcodes. */
+ opcode
+ = (const struct sw_64_opcode *) str_hash_find (sw_64_opcode_hash, opname);
+ if (opcode)
+ {
+ found_something = 1;
+ opcode = find_opcode_match (opcode, tok, &ntok, &cpumatch);
+ if (opcode)
+ {
+ struct sw_64_insn insn;
+ assemble_insn (opcode, tok, ntok, &insn, reloc);
+
+ /* Copy the sequence number for the reloc from the reloc token. */
+ if (reloc != BFD_RELOC_UNUSED)
+ insn.sequence = tok[ntok].X_add_number;
+
+ emit_insn (&insn);
+ return;
+ }
+ }
+
+ if (found_something)
+ {
+ if (cpumatch)
+ as_bad (_ ("inappropriate arguments for opcode `%s'"), opname);
+ else
+ as_bad (_ ("opcode `%s' not supported for target %s"), opname,
+ sw_64_target_name);
+ }
+ else
+ as_bad (_ ("unknown opcode `%s'"), opname);
+}
+
+#ifdef OBJ_EVAX
+
+/* Add sym+addend to link pool.
+ Return offset from current procedure value (pv) to entry in link pool.
+
+ Add new fixup only if offset isn't 16bit. */
+
+static symbolS *
+add_to_link_pool (symbolS *sym, offsetT addend)
+{
+ symbolS *basesym;
+ segT current_section = now_seg;
+ int current_subsec = now_subseg;
+ char *p;
+ segment_info_type *seginfo = seg_info (sw_64_link_section);
+ fixS *fixp;
+ symbolS *linksym, *expsym;
+ expressionS e;
+
+ basesym = sw_64_evax_proc->symbol;
+
+ /* @@ This assumes all entries in a given section will be of the same
+ size... Probably correct, but unwise to rely on. */
+ /* This must always be called with the same subsegment. */
+
+ if (seginfo->frchainP)
+ for (fixp = seginfo->frchainP->fix_root; fixp != (fixS *) NULL;
+ fixp = fixp->fx_next)
+ {
+ if (fixp->fx_addsy == sym && fixp->fx_offset == (valueT) addend
+ && fixp->tc_fix_data.info && fixp->tc_fix_data.info->sym
+ && symbol_symbolS (fixp->tc_fix_data.info->sym)
+ && (symbol_get_value_expression (fixp->tc_fix_data.info->sym)
+ ->X_op_symbol
+ == basesym))
+ return fixp->tc_fix_data.info->sym;
+ }
+
+ /* Not found, add a new entry. */
+ subseg_set (sw_64_link_section, 0);
+ linksym = symbol_new (FAKE_LABEL_NAME, now_seg, frag_now, frag_now_fix ());
+ p = frag_more (8);
+ memset (p, 0, 8);
+
+ /* Create a symbol for 'basesym - linksym' (offset of the added entry). */
+ e.X_op = O_subtract;
+ e.X_add_symbol = linksym;
+ e.X_op_symbol = basesym;
+ e.X_add_number = 0;
+ expsym = make_expr_symbol (&e);
+
+ /* Create a fixup for the entry. */
+ fixp = fix_new (frag_now, p - frag_now->fr_literal, 8, sym, addend, 0,
+ BFD_RELOC_64);
+ fixp->tc_fix_data.info = get_sw_64_reloc_tag (next_sequence_num--);
+ fixp->tc_fix_data.info->sym = expsym;
+
+ subseg_set (current_section, current_subsec);
+
+ /* Return the symbol. */
+ return expsym;
+}
+#endif /* OBJ_EVAX */
+
+/* Assembler directives. */
+
+/* Handle the .text pseudo-op. This is like the usual one, but it
+ clears sw_64_insn_label and restores auto alignment. */
+
+static void
+s_sw_64_text (int i)
+{
+#ifdef OBJ_ELF
+ obj_elf_text (i);
+#else
+ s_text (i);
+#endif
+#ifdef OBJ_EVAX
+ {
+ symbolS *symbolP;
+
+ symbolP = symbol_find (".text");
+ if (symbolP == NULL)
+ {
+ symbolP = symbol_make (".text");
+ S_SET_SEGMENT (symbolP, text_section);
+ symbol_table_insert (symbolP);
+ }
+ }
+#endif
+ sw_64_insn_label = NULL;
+ sw_64_auto_align_on = 1;
+ sw_64_current_align = 0;
+}
+
+/* Handle the .data pseudo-op. This is like the usual one, but it
+ clears sw_64_insn_label and restores auto alignment. */
+
+static void
+s_sw_64_data (int i)
+{
+#ifdef OBJ_ELF
+ obj_elf_data (i);
+#else
+ s_data (i);
+#endif
+ sw_64_insn_label = NULL;
+ sw_64_auto_align_on = 1;
+ sw_64_current_align = 0;
+}
+
+#if defined (OBJ_ECOFF) || defined (OBJ_EVAX)
+
+/* Handle the OSF/1 and openVMS .comm pseudo quirks. */
+
+static void
+s_sw_64_comm (int ignore ATTRIBUTE_UNUSED)
+{
+ char *name;
+ char c;
+ char *p;
+ offsetT size;
+ symbolS *symbolP;
+#ifdef OBJ_EVAX
+ offsetT temp;
+ int log_align = 0;
+#endif
+
+ c = get_symbol_name (&name);
+
+ /* Just after name is now '\0'. */
+ p = input_line_pointer;
+ *p = c;
+
+ SKIP_WHITESPACE_AFTER_NAME ();
+
+ /* Sw_64 OSF/1 compiler doesn't provide the comma, gcc does. */
+ if (*input_line_pointer == ',')
+ {
+ input_line_pointer++;
+ SKIP_WHITESPACE ();
+ }
+ if ((size = get_absolute_expression ()) < 0)
+ {
+ as_warn (_ (".COMMon length (%ld.) <0! Ignored."), (long) size);
+ ignore_rest_of_line ();
+ return;
+ }
+
+ *p = 0;
+ symbolP = symbol_find_or_make (name);
+ *p = c;
+
+ if (S_IS_DEFINED (symbolP) && !S_IS_COMMON (symbolP))
+ {
+ as_bad (_ ("Ignoring attempt to re-define symbol"));
+ ignore_rest_of_line ();
+ return;
+ }
+
+#ifdef OBJ_EVAX
+ if (*input_line_pointer != ',')
+ temp = 8; /* Default alignment. */
+ else
+ {
+ input_line_pointer++;
+ SKIP_WHITESPACE ();
+ temp = get_absolute_expression ();
+ }
+
+ /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
+ while ((temp >>= 1) != 0)
+ ++log_align;
+
+ if (*input_line_pointer == ',')
+ {
+ /* Extended form of the directive
+
+ .comm symbol, size, alignment, section
+
+ where the "common" semantics is transferred to the section.
+ The symbol is effectively an alias for the section name. */
+
+ segT sec;
+ const char *sec_name;
+ symbolS *sec_symbol;
+ segT current_seg = now_seg;
+ subsegT current_subseg = now_subseg;
+ int cur_size;
+
+ input_line_pointer++;
+ SKIP_WHITESPACE ();
+ sec_name = s_sw_64_section_name ();
+ sec_symbol = symbol_find_or_make (sec_name);
+ sec = subseg_new (sec_name, 0);
+ S_SET_SEGMENT (sec_symbol, sec);
+ symbol_get_bfdsym (sec_symbol)->flags |= BSF_SECTION_SYM;
+ bfd_vms_set_section_flags (stdoutput, sec, 0,
+ EGPS__V_OVR | EGPS__V_GBL | EGPS__V_NOMOD);
+ record_alignment (sec, log_align);
+
+ /* Reuse stab_string_size to store the size of the section. */
+ cur_size = seg_info (sec)->stabu.stab_string_size;
+ if ((int) size > cur_size)
+ {
+ char *pfrag = frag_var (rs_fill, 1, 1, (relax_substateT) 0, NULL,
+ (valueT) size - (valueT) cur_size, NULL);
+ *pfrag = 0;
+ seg_info (sec)->stabu.stab_string_size = (int) size;
+ }
+
+ S_SET_SEGMENT (symbolP, sec);
+
+ subseg_set (current_seg, current_subseg);
+ }
+ else
+ {
+ /* Regular form of the directive
+
+ .comm symbol, size, alignment
+
+ where the "common" semantics in on the symbol.
+ These symbols are assembled in the .bss section. */
+
+ char *pfrag;
+ segT current_seg = now_seg;
+ subsegT current_subseg = now_subseg;
+
+ subseg_set (bss_section, 1);
+ frag_align (log_align, 0, 0);
+ record_alignment (bss_section, log_align);
+
+ symbol_set_frag (symbolP, frag_now);
+ pfrag = frag_var (rs_org, 1, 1, (relax_substateT) 0, symbolP, size, NULL);
+ *pfrag = 0;
+
+ S_SET_SEGMENT (symbolP, bss_section);
+
+ subseg_set (current_seg, current_subseg);
+ }
+#endif
+
+ if (S_GET_VALUE (symbolP))
+ {
+ if (S_GET_VALUE (symbolP) != (valueT) size)
+ as_bad (_ (
+ "Length of .comm \"%s\" is already %ld. Not changed to %ld."),
+ S_GET_NAME (symbolP), (long) S_GET_VALUE (symbolP),
+ (long) size);
+ }
+ else
+ {
+#ifndef OBJ_EVAX
+ S_SET_VALUE (symbolP, (valueT) size);
+#endif
+ S_SET_EXTERNAL (symbolP);
+ }
+
+#ifndef OBJ_EVAX
+ know (symbol_get_frag (symbolP) == &zero_address_frag);
+#endif
+ demand_empty_rest_of_line ();
+}
+
+#endif /* ! OBJ_ELF */
+
+#ifdef OBJ_ECOFF
+
+/* Handle the .rdata pseudo-op. This is like the usual one, but it
+ clears sw_64_insn_label and restores auto alignment. */
+
+static void
+s_sw_64_rdata (int ignore ATTRIBUTE_UNUSED)
+{
+ get_absolute_expression ();
+ subseg_new (".rdata", 0);
+ demand_empty_rest_of_line ();
+ sw_64_insn_label = NULL;
+ sw_64_auto_align_on = 1;
+ sw_64_current_align = 0;
+}
+
+#endif
+
+#ifdef OBJ_ECOFF
+
+/* Handle the .sdata pseudo-op. This is like the usual one, but it
+ clears sw_64_insn_label and restores auto alignment. */
+
+static void
+s_sw_64_sdata (int ignore ATTRIBUTE_UNUSED)
+{
+ get_absolute_expression ();
+ subseg_new (".sdata", 0);
+ demand_empty_rest_of_line ();
+ sw_64_insn_label = NULL;
+ sw_64_auto_align_on = 1;
+ sw_64_current_align = 0;
+}
+#endif
+
+#ifdef OBJ_ELF
+struct sw_64_elf_frame_data
+{
+ symbolS *func_sym;
+ symbolS *func_end_sym;
+ symbolS *prologue_sym;
+ unsigned int mask;
+ unsigned int fmask;
+ int fp_regno;
+ int ra_regno;
+ offsetT frame_size;
+ offsetT mask_offset;
+ offsetT fmask_offset;
+
+ struct sw_64_elf_frame_data *next;
+};
+
+static struct sw_64_elf_frame_data *all_frame_data;
+static struct sw_64_elf_frame_data **plast_frame_data = &all_frame_data;
+static struct sw_64_elf_frame_data *cur_frame_data;
+
+extern int all_cfi_sections;
+
+/* Handle the .section pseudo-op. This is like the usual one, but it
+ clears sw_64_insn_label and restores auto alignment. */
+
+static void
+s_sw_64_section (int ignore ATTRIBUTE_UNUSED)
+{
+ obj_elf_section (ignore);
+
+ sw_64_insn_label = NULL;
+ sw_64_auto_align_on = 1;
+ sw_64_current_align = 0;
+}
+
+static void
+s_sw_64_ent (int dummy ATTRIBUTE_UNUSED)
+{
+ if (ECOFF_DEBUGGING)
+ ecoff_directive_ent (0);
+ else
+ {
+ char *name, name_end;
+
+ name_end = get_symbol_name (&name);
+ /* CFI_EMIT_eh_frame is the default. */
+ all_cfi_sections = CFI_EMIT_eh_frame;
+
+ if (!is_name_beginner (*name))
+ {
+ as_warn (_ (".ent directive has no name"));
+ (void) restore_line_pointer (name_end);
+ }
+ else
+ {
+ symbolS *sym;
+
+ if (cur_frame_data)
+ as_warn (_ ("nested .ent directives"));
+
+ sym = symbol_find_or_make (name);
+ symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
+
+ cur_frame_data = XCNEW (struct sw_64_elf_frame_data);
+ cur_frame_data->func_sym = sym;
+
+ /* Provide sensible defaults. */
+ cur_frame_data->fp_regno = 30; /* sp */
+ cur_frame_data->ra_regno = 26; /* ra */
+
+ *plast_frame_data = cur_frame_data;
+ plast_frame_data = &cur_frame_data->next;
+
+ /* The .ent directive is sometimes followed by a number. Not sure
+ what it really means, but ignore it. */
+ *input_line_pointer = name_end;
+ SKIP_WHITESPACE_AFTER_NAME ();
+ if (*input_line_pointer == ',')
+ {
+ input_line_pointer++;
+ SKIP_WHITESPACE ();
+ }
+ if (ISDIGIT (*input_line_pointer) || *input_line_pointer == '-')
+ (void) get_absolute_expression ();
+ }
+ demand_empty_rest_of_line ();
+ }
+}
+
+static void
+s_sw_64_end (int dummy ATTRIBUTE_UNUSED)
+{
+ if (ECOFF_DEBUGGING)
+ ecoff_directive_end (0);
+ else
+ {
+ char *name, name_end;
+
+ name_end = get_symbol_name (&name);
+
+ if (!is_name_beginner (*name))
+ {
+ as_warn (_ (".end directive has no name"));
+ }
+ else
+ {
+ symbolS *sym;
+
+ sym = symbol_find (name);
+ if (!cur_frame_data)
+ as_warn (_ (".end directive without matching .ent"));
+ else if (sym != cur_frame_data->func_sym)
+ as_warn (_ (".end directive names different symbol than .ent"));
+
+ /* Create an expression to calculate the size of the function. */
+ if (sym && cur_frame_data)
+ {
+ OBJ_SYMFIELD_TYPE *obj = symbol_get_obj (sym);
+ expressionS *exp = XNEW (expressionS);
+
+ obj->size = exp;
+ exp->X_op = O_subtract;
+ exp->X_add_symbol = symbol_temp_new_now ();
+ exp->X_op_symbol = sym;
+ exp->X_add_number = 0;
+
+ cur_frame_data->func_end_sym = exp->X_add_symbol;
+ }
+
+ cur_frame_data = NULL;
+ }
+
+ (void) restore_line_pointer (name_end);
+ demand_empty_rest_of_line ();
+ }
+}
+
+static void
+s_sw_64_mask (int fp)
+{
+ if (ECOFF_DEBUGGING)
+ {
+ if (fp)
+ ecoff_directive_fmask (0);
+ else
+ ecoff_directive_mask (0);
+ }
+ else
+ {
+ long val;
+ offsetT offset;
+
+ if (!cur_frame_data)
+ {
+ if (fp)
+ as_warn (_ (".fmask outside of .ent"));
+ else
+ as_warn (_ (".mask outside of .ent"));
+ discard_rest_of_line ();
+ return;
+ }
+
+ if (get_absolute_expression_and_terminator (&val) != ',')
+ {
+ if (fp)
+ as_warn (_ ("bad .fmask directive"));
+ else
+ as_warn (_ ("bad .mask directive"));
+ --input_line_pointer;
+ discard_rest_of_line ();
+ return;
+ }
+
+ offset = get_absolute_expression ();
+ demand_empty_rest_of_line ();
+
+ if (fp)
+ {
+ cur_frame_data->fmask = val;
+ cur_frame_data->fmask_offset = offset;
+ }
+ else
+ {
+ cur_frame_data->mask = val;
+ cur_frame_data->mask_offset = offset;
+ }
+ }
+}
+
+static void
+s_sw_64_frame (int dummy ATTRIBUTE_UNUSED)
+{
+ if (ECOFF_DEBUGGING)
+ ecoff_directive_frame (0);
+ else
+ {
+ long val;
+
+ if (!cur_frame_data)
+ {
+ as_warn (_ (".frame outside of .ent"));
+ discard_rest_of_line ();
+ return;
+ }
+
+ cur_frame_data->fp_regno = tc_get_register (1);
+
+ SKIP_WHITESPACE ();
+ if (*input_line_pointer++ != ','
+ || get_absolute_expression_and_terminator (&val) != ',')
+ {
+ as_warn (_ ("bad .frame directive"));
+ --input_line_pointer;
+ discard_rest_of_line ();
+ return;
+ }
+ cur_frame_data->frame_size = val;
+
+ cur_frame_data->ra_regno = tc_get_register (0);
+
+ /* Next comes the "offset of saved $a0 from $sp". In gcc terms
+ this is current_function_pretend_args_size. There's no place
+ to put this value, so ignore it. */
+ s_ignore (42);
+ }
+}
+
+static void
+s_sw_64_prologue (int ignore ATTRIBUTE_UNUSED)
+{
+ symbolS *sym;
+ int arg;
+
+ arg = get_absolute_expression ();
+ demand_empty_rest_of_line ();
+ sw_64_prologue_label
+ = symbol_new (FAKE_LABEL_NAME, now_seg, frag_now, frag_now_fix ());
+
+ if (ECOFF_DEBUGGING)
+ sym = ecoff_get_cur_proc_sym ();
+ else
+ sym = cur_frame_data ? cur_frame_data->func_sym : NULL;
+
+ if (sym == NULL)
+ {
+ as_bad (_ (".prologue directive without a preceding .ent directive"));
+ return;
+ }
+
+ switch (arg)
+ {
+ case 0: /* No PV required. */
+ S_SET_OTHER (sym, STO_SW_64_NOPV
+ | (S_GET_OTHER (sym) & ~STO_SW_64_STD_GPLOAD));
+ break;
+ case 1: /* Std GP load. */
+ S_SET_OTHER (sym, STO_SW_64_STD_GPLOAD
+ | (S_GET_OTHER (sym) & ~STO_SW_64_STD_GPLOAD));
+ break;
+ case 2: /* Non-std use of PV. */
+ break;
+
+ default:
+ as_bad (_ ("Invalid argument %d to .prologue."), arg);
+ break;
+ }
+
+ if (cur_frame_data)
+ cur_frame_data->prologue_sym = symbol_temp_new_now ();
+}
+
+static char *first_file_directive;
+
+static void
+s_sw_64_file (int ignore ATTRIBUTE_UNUSED)
+{
+ /* Save the first .file directive we see, so that we can change our
+ minds about whether ecoff debugging should or shouldn't be enabled. */
+ if (sw_64_flag_mdebug < 0 && !first_file_directive)
+ {
+ char *start = input_line_pointer;
+ size_t len;
+
+ discard_rest_of_line ();
+
+ len = input_line_pointer - start;
+ first_file_directive = xmemdup0 (start, len);
+
+ input_line_pointer = start;
+ }
+
+ if (ECOFF_DEBUGGING)
+ ecoff_directive_file (0);
+ else
+ dwarf2_directive_file (0);
+}
+
+static void
+s_sw_64_loc (int ignore ATTRIBUTE_UNUSED)
+{
+ if (ECOFF_DEBUGGING)
+ ecoff_directive_loc (0);
+ else
+ dwarf2_directive_loc (0);
+}
+
+static void
+s_sw_64_stab (int n)
+{
+ /* If we've been undecided about mdebug, make up our minds in favour. */
+ if (sw_64_flag_mdebug < 0)
+ {
+ segT sec = subseg_new (".mdebug", 0);
+ bfd_set_section_flags (sec, SEC_HAS_CONTENTS | SEC_READONLY);
+ bfd_set_section_alignment (sec, 3);
+ ecoff_read_begin_hook ();
+
+ if (first_file_directive)
+ {
+ char *save_ilp = input_line_pointer;
+ input_line_pointer = first_file_directive;
+ ecoff_directive_file (0);
+ input_line_pointer = save_ilp;
+ free (first_file_directive);
+ }
+
+ sw_64_flag_mdebug = 1;
+ }
+ s_stab (n);
+}
+
+static void
+s_sw_64_coff_wrapper (int which)
+{
+ static void (*const fns[]) (int) = {
+ ecoff_directive_begin, ecoff_directive_bend, ecoff_directive_def,
+ ecoff_directive_dim, ecoff_directive_endef, ecoff_directive_scl,
+ ecoff_directive_tag, ecoff_directive_val,
+ };
+
+ gas_assert (which >= 0 && which < (int) (sizeof (fns) / sizeof (*fns)));
+
+ if (ECOFF_DEBUGGING)
+ (*fns[which]) (0);
+ else
+ {
+ as_bad (_ ("ECOFF debugging is disabled."));
+ ignore_rest_of_line ();
+ }
+}
+
+/* Called at the end of assembly. Here we emit unwind info for frames
+ unless the compiler has done it for us. */
+
+void
+sw_64_elf_md_finish (void)
+{
+ struct sw_64_elf_frame_data *p;
+
+ if (cur_frame_data)
+ as_warn (_ (".ent directive without matching .end"));
+
+ /* If someone has generated the unwind info themselves, great. */
+ if (bfd_get_section_by_name (stdoutput, ".eh_frame") != NULL)
+ return;
+
+ /* ??? In theory we could look for functions for which we have
+ generated unwind info via CFI directives, and those we have not.
+ Those we have not could still get their unwind info from here.
+ For now, do nothing if we've seen any CFI directives. Note that
+ the above test will not trigger, as we've not emitted data yet. */
+ if (all_fde_data != NULL)
+ return;
+
+ /* Generate .eh_frame data for the unwind directives specified. */
+ for (p = all_frame_data; p; p = p->next)
+ if (p->prologue_sym)
+ {
+ /* Create a temporary symbol at the same location as our
+ function symbol. This prevents problems with globals. */
+ cfi_new_fde (symbol_temp_new (S_GET_SEGMENT (p->func_sym),
+ symbol_get_frag (p->func_sym),
+ S_GET_VALUE (p->func_sym)));
+
+ cfi_set_sections ();
+ cfi_set_return_column (p->ra_regno);
+ cfi_add_CFA_def_cfa_register (30);
+ if (p->fp_regno != 30 || p->mask || p->fmask || p->frame_size)
+ {
+ unsigned int mask;
+ offsetT offset;
+
+ cfi_add_advance_loc (p->prologue_sym);
+
+ if (p->fp_regno != 30)
+ if (p->frame_size != 0)
+ cfi_add_CFA_def_cfa (p->fp_regno, p->frame_size);
+ else
+ cfi_add_CFA_def_cfa_register (p->fp_regno);
+ else if (p->frame_size != 0)
+ cfi_add_CFA_def_cfa_offset (p->frame_size);
+
+ mask = p->mask;
+ offset = p->mask_offset;
+
+ /* Recall that $26 is special-cased and stored first. */
+ if ((mask >> 26) & 1)
+ {
+ cfi_add_CFA_offset (26, offset);
+ offset += 8;
+ mask &= ~(1 << 26);
+ }
+ while (mask)
+ {
+ unsigned int i;
+ i = mask & -mask;
+ mask ^= i;
+ i = ffs (i) - 1;
+
+ cfi_add_CFA_offset (i, offset);
+ offset += 8;
+ }
+
+ mask = p->fmask;
+ offset = p->fmask_offset;
+ while (mask)
+ {
+ unsigned int i;
+ i = mask & -mask;
+ mask ^= i;
+ i = ffs (i) - 1;
+
+ cfi_add_CFA_offset (i + 32, offset);
+ offset += 8;
+ }
+ }
+
+ cfi_end_fde (p->func_end_sym);
+ }
+}
+
+static void
+s_sw_64_usepv (int unused ATTRIBUTE_UNUSED)
+{
+ char *name, name_end;
+ char *which, which_end;
+ symbolS *sym;
+ int other;
+
+ name_end = get_symbol_name (&name);
+
+ if (!is_name_beginner (*name))
+ {
+ as_bad (_ (".usepv directive has no name"));
+ (void) restore_line_pointer (name_end);
+ ignore_rest_of_line ();
+ return;
+ }
+
+ sym = symbol_find_or_make (name);
+ name_end = restore_line_pointer (name_end);
+ if (!is_end_of_line[(unsigned char) name_end])
+ input_line_pointer++;
+
+ if (name_end != ',')
+ {
+ as_bad (_ (".usepv directive has no type"));
+ ignore_rest_of_line ();
+ return;
+ }
+
+ SKIP_WHITESPACE ();
+
+ which_end = get_symbol_name (&which);
+
+ if (strcmp (which, "no") == 0)
+ other = STO_SW_64_NOPV;
+ else if (strcmp (which, "std") == 0)
+ other = STO_SW_64_STD_GPLOAD;
+ else
+ {
+ as_bad (_ ("unknown argument for .usepv"));
+ other = 0;
+ }
+
+ (void) restore_line_pointer (which_end);
+ demand_empty_rest_of_line ();
+
+ S_SET_OTHER (sym, other | (S_GET_OTHER (sym) & ~STO_SW_64_STD_GPLOAD));
+}
+#endif /* OBJ_ELF */
+
+/* Standard calling conventions leaves the CFA at $30 on entry. */
+
+void
+sw_64_cfi_frame_initial_instructions (void)
+{
+ cfi_add_CFA_def_cfa_register (30);
+}
+
+#ifdef OBJ_EVAX
+
+/* Get name of section. */
+static const char *
+s_sw_64_section_name (void)
+{
+ char *name;
+
+ SKIP_WHITESPACE ();
+ if (*input_line_pointer == '"')
+ {
+ int dummy;
+
+ name = demand_copy_C_string (&dummy);
+ if (name == NULL)
+ {
+ ignore_rest_of_line ();
+ return NULL;
+ }
+ }
+ else
+ {
+ char *end = input_line_pointer;
+
+ while (0 == strchr ("\n\t,; ", *end))
+ end++;
+ if (end == input_line_pointer)
+ {
+ as_warn (_ ("missing name"));
+ ignore_rest_of_line ();
+ return NULL;
+ }
+
+ name = xmemdup0 (input_line_pointer, end - input_line_pointer);
+ input_line_pointer = end;
+ }
+ SKIP_WHITESPACE ();
+ return name;
+}
+
+/* Put clear/set flags in one flagword. The LSBs are flags to be set,
+ the MSBs are the flags to be cleared. */
+
+#define EGPS__V_NO_SHIFT 16
+#define EGPS__V_MASK 0xffff
+
+/* Parse one VMS section flag. */
+
+static flagword
+s_sw_64_section_word (char *str, size_t len)
+{
+ int no = 0;
+ flagword flag = 0;
+
+ if (len == 5 && strncmp (str, "NO", 2) == 0)
+ {
+ no = 1;
+ str += 2;
+ len -= 2;
+ }
+
+ if (len == 3)
+ {
+ if (strncmp (str, "PIC", 3) == 0)
+ flag = EGPS__V_PIC;
+ else if (strncmp (str, "LIB", 3) == 0)
+ flag = EGPS__V_LIB;
+ else if (strncmp (str, "OVR", 3) == 0)
+ flag = EGPS__V_OVR;
+ else if (strncmp (str, "REL", 3) == 0)
+ flag = EGPS__V_REL;
+ else if (strncmp (str, "GBL", 3) == 0)
+ flag = EGPS__V_GBL;
+ else if (strncmp (str, "SHR", 3) == 0)
+ flag = EGPS__V_SHR;
+ else if (strncmp (str, "EXE", 3) == 0)
+ flag = EGPS__V_EXE;
+ else if (strncmp (str, "WRT", 3) == 0)
+ flag = EGPS__V_WRT;
+ else if (strncmp (str, "VEC", 3) == 0)
+ flag = EGPS__V_VEC;
+ else if (strncmp (str, "MOD", 3) == 0)
+ {
+ flag = no ? EGPS__V_NOMOD : EGPS__V_NOMOD << EGPS__V_NO_SHIFT;
+ no = 0;
+ }
+ else if (strncmp (str, "COM", 3) == 0)
+ flag = EGPS__V_COM;
+ }
+
+ if (flag == 0)
+ {
+ char c = str[len];
+ str[len] = 0;
+ as_warn (_ ("unknown section attribute %s"), str);
+ str[len] = c;
+ return 0;
+ }
+
+ if (no)
+ return flag << EGPS__V_NO_SHIFT;
+ else
+ return flag;
+}
+
+/* Handle the section specific pseudo-op. */
+
+#define EVAX_SECTION_COUNT 5
+
+static const char *section_name[EVAX_SECTION_COUNT + 1]
+ = {"NULL", ".rdata", ".comm", ".link", ".ctors", ".dtors"};
+
+static void
+s_sw_64_section (int secid)
+{
+ const char *name;
+ char *beg;
+ segT sec;
+ flagword vms_flags = 0;
+ symbolS *symbol;
+
+ if (secid == 0)
+ {
+ name = s_sw_64_section_name ();
+ if (name == NULL)
+ return;
+ sec = subseg_new (name, 0);
+ if (*input_line_pointer == ',')
+ {
+ /* Skip the comma. */
+ ++input_line_pointer;
+ SKIP_WHITESPACE ();
+
+ do
+ {
+ char c;
+
+ SKIP_WHITESPACE ();
+ c = get_symbol_name (&beg);
+ *input_line_pointer = c;
+
+ vms_flags |= s_sw_64_section_word (beg, input_line_pointer - beg);
+
+ SKIP_WHITESPACE_AFTER_NAME ();
+ } while (*input_line_pointer++ == ',');
+
+ --input_line_pointer;
+ }
+
+ symbol = symbol_find_or_make (name);
+ S_SET_SEGMENT (symbol, sec);
+ symbol_get_bfdsym (symbol)->flags |= BSF_SECTION_SYM;
+ bfd_vms_set_section_flags (stdoutput, sec,
+ (vms_flags >> EGPS__V_NO_SHIFT) & EGPS__V_MASK,
+ vms_flags & EGPS__V_MASK);
+ }
+ else
+ {
+ get_absolute_expression ();
+ subseg_new (section_name[secid], 0);
+ }
+
+ demand_empty_rest_of_line ();
+ sw_64_insn_label = NULL;
+ sw_64_auto_align_on = 1;
+ sw_64_current_align = 0;
+}
+
+static void
+s_sw_64_literals (int ignore ATTRIBUTE_UNUSED)
+{
+ subseg_new (".literals", 0);
+ demand_empty_rest_of_line ();
+ sw_64_insn_label = NULL;
+ sw_64_auto_align_on = 1;
+ sw_64_current_align = 0;
+}
+
+/* Parse .ent directives. */
+
+static void
+s_sw_64_ent (int ignore ATTRIBUTE_UNUSED)
+{
+ symbolS *symbol;
+ expressionS symexpr;
+
+ if (sw_64_evax_proc != NULL)
+ as_bad (_ ("previous .ent not closed by a .end"));
+
+ sw_64_evax_proc = &sw_64_evax_proc_data;
+
+ sw_64_evax_proc->pdsckind = 0;
+ sw_64_evax_proc->framereg = -1;
+ sw_64_evax_proc->framesize = 0;
+ sw_64_evax_proc->rsa_offset = 0;
+ sw_64_evax_proc->ra_save = AXP_REG_RA;
+ sw_64_evax_proc->fp_save = -1;
+ sw_64_evax_proc->imask = 0;
+ sw_64_evax_proc->fmask = 0;
+ sw_64_evax_proc->prologue = 0;
+ sw_64_evax_proc->type = 0;
+ sw_64_evax_proc->handler = 0;
+ sw_64_evax_proc->handler_data = 0;
+
+ expression (&symexpr);
+
+ if (symexpr.X_op != O_symbol)
+ {
+ as_fatal (_ (".ent directive has no symbol"));
+ demand_empty_rest_of_line ();
+ return;
+ }
+
+ symbol = make_expr_symbol (&symexpr);
+ symbol_get_bfdsym (symbol)->flags |= BSF_FUNCTION;
+ sw_64_evax_proc->symbol = symbol;
+
+ demand_empty_rest_of_line ();
+}
+
+static void
+s_sw_64_handler (int is_data)
+{
+ if (is_data)
+ sw_64_evax_proc->handler_data = get_absolute_expression ();
+ else
+ {
+ char *name, name_end;
+
+ name_end = get_symbol_name (&name);
+
+ if (!is_name_beginner (*name))
+ {
+ as_warn (_ (".handler directive has no name"));
+ }
+ else
+ {
+ symbolS *sym;
+
+ sym = symbol_find_or_make (name);
+ symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
+ sw_64_evax_proc->handler = sym;
+ }
+
+ (void) restore_line_pointer (name_end);
+ }
+
+ demand_empty_rest_of_line ();
+}
+
+/* Parse .frame <framreg>,<framesize>,RA,<rsa_offset> directives. */
+
+static void
+s_sw_64_frame (int ignore ATTRIBUTE_UNUSED)
+{
+ long val;
+ int ra;
+
+ sw_64_evax_proc->framereg = tc_get_register (1);
+
+ SKIP_WHITESPACE ();
+ if (*input_line_pointer++ != ','
+ || get_absolute_expression_and_terminator (&val) != ',')
+ {
+ as_warn (_ ("Bad .frame directive 1./2. param"));
+ --input_line_pointer;
+ demand_empty_rest_of_line ();
+ return;
+ }
+
+ sw_64_evax_proc->framesize = val;
+
+ ra = tc_get_register (1);
+ if (ra != AXP_REG_RA)
+ as_warn (_ ("Bad RA (%d) register for .frame"), ra);
+
+ SKIP_WHITESPACE ();
+ if (*input_line_pointer++ != ',')
+ {
+ as_warn (_ ("Bad .frame directive 3./4. param"));
+ --input_line_pointer;
+ demand_empty_rest_of_line ();
+ return;
+ }
+ sw_64_evax_proc->rsa_offset = get_absolute_expression ();
+}
+
+/* Parse .prologue. */
+
+static void
+s_sw_64_prologue (int ignore ATTRIBUTE_UNUSED)
+{
+ demand_empty_rest_of_line ();
+ sw_64_prologue_label
+ = symbol_new (FAKE_LABEL_NAME, now_seg, frag_now, frag_now_fix ());
+}
+
+/* Parse .pdesc <entry_name>,{null|stack|reg}
+ Insert a procedure descriptor. */
+
+static void
+s_sw_64_pdesc (int ignore ATTRIBUTE_UNUSED)
+{
+ char *name;
+ char name_end;
+ char *p;
+ expressionS exp;
+ symbolS *entry_sym;
+ const char *entry_sym_name;
+ const char *pdesc_sym_name;
+ fixS *fixp;
+ size_t len;
+
+ if (now_seg != sw_64_link_section)
+ {
+ as_bad (_ (".pdesc directive not in link (.link) section"));
+ return;
+ }
+
+ expression (&exp);
+ if (exp.X_op != O_symbol)
+ {
+ as_bad (_ (".pdesc directive has no entry symbol"));
+ return;
+ }
+
+ entry_sym = make_expr_symbol (&exp);
+ entry_sym_name = S_GET_NAME (entry_sym);
+
+ /* Strip "..en". */
+ len = strlen (entry_sym_name);
+ if (len < 4 || strcmp (entry_sym_name + len - 4, "..en") != 0)
+ {
+ as_bad (_ (".pdesc has a bad entry symbol"));
+ return;
+ }
+ len -= 4;
+ pdesc_sym_name = S_GET_NAME (sw_64_evax_proc->symbol);
+
+ if (!sw_64_evax_proc || !S_IS_DEFINED (sw_64_evax_proc->symbol)
+ || strlen (pdesc_sym_name) != len
+ || memcmp (entry_sym_name, pdesc_sym_name, len) != 0)
+ {
+ as_fatal (_ (".pdesc doesn't match with last .ent"));
+ return;
+ }
+
+ /* Define pdesc symbol. */
+ symbol_set_value_now (sw_64_evax_proc->symbol);
+
+ /* Save bfd symbol of proc entry in function symbol. */
+ ((struct evax_private_udata_struct *) symbol_get_bfdsym (
+ sw_64_evax_proc->symbol)
+ ->udata.p)
+ ->enbsym
+ = symbol_get_bfdsym (entry_sym);
+
+ SKIP_WHITESPACE ();
+ if (*input_line_pointer++ != ',')
+ {
+ as_warn (_ ("No comma after .pdesc <entryname>"));
+ demand_empty_rest_of_line ();
+ return;
+ }
+
+ SKIP_WHITESPACE ();
+ name_end = get_symbol_name (&name);
+
+ if (strncmp (name, "stack", 5) == 0)
+ sw_64_evax_proc->pdsckind = PDSC_S_K_KIND_FP_STACK;
+
+ else if (strncmp (name, "reg", 3) == 0)
+ sw_64_evax_proc->pdsckind = PDSC_S_K_KIND_FP_REGISTER;
+
+ else if (strncmp (name, "null", 4) == 0)
+ sw_64_evax_proc->pdsckind = PDSC_S_K_KIND_NULL;
+
+ else
+ {
+ (void) restore_line_pointer (name_end);
+ as_fatal (_ ("unknown procedure kind"));
+ demand_empty_rest_of_line ();
+ return;
+ }
+
+ (void) restore_line_pointer (name_end);
+ demand_empty_rest_of_line ();
+
+#ifdef md_flush_pending_output
+ md_flush_pending_output ();
+#endif
+
+ frag_align (3, 0, 0);
+ p = frag_more (16);
+ fixp = fix_new (frag_now, p - frag_now->fr_literal, 8, 0, 0, 0, 0);
+ fixp->fx_done = 1;
+
+ *p = sw_64_evax_proc->pdsckind
+ | ((sw_64_evax_proc->framereg == 29) ? PDSC_S_M_BASE_REG_IS_FP : 0)
+ | ((sw_64_evax_proc->handler) ? PDSC_S_M_HANDLER_VALID : 0)
+ | ((sw_64_evax_proc->handler_data) ? PDSC_S_M_HANDLER_DATA_VALID : 0);
+ *(p + 1) = PDSC_S_M_NATIVE | PDSC_S_M_NO_JACKET;
+
+ switch (sw_64_evax_proc->pdsckind)
+ {
+ case PDSC_S_K_KIND_NULL:
+ *(p + 2) = 0;
+ *(p + 3) = 0;
+ break;
+ case PDSC_S_K_KIND_FP_REGISTER:
+ *(p + 2) = sw_64_evax_proc->fp_save;
+ *(p + 3) = sw_64_evax_proc->ra_save;
+ break;
+ case PDSC_S_K_KIND_FP_STACK:
+ md_number_to_chars (p + 2, (valueT) sw_64_evax_proc->rsa_offset, 2);
+ break;
+ default: /* impossible */
+ break;
+ }
+
+ *(p + 4) = 0;
+ *(p + 5) = sw_64_evax_proc->type & 0x0f;
+
+ /* Signature offset. */
+ md_number_to_chars (p + 6, (valueT) 0, 2);
+
+ fix_new_exp (frag_now, p - frag_now->fr_literal + 8, 8, &exp, 0,
+ BFD_RELOC_64);
+
+ if (sw_64_evax_proc->pdsckind == PDSC_S_K_KIND_NULL)
+ return;
+
+ /* pdesc+16: Size. */
+ p = frag_more (6);
+ md_number_to_chars (p, (valueT) sw_64_evax_proc->framesize, 4);
+ md_number_to_chars (p + 4, (valueT) 0, 2);
+
+ /* Entry length. */
+ exp.X_op = O_subtract;
+ exp.X_add_symbol = sw_64_prologue_label;
+ exp.X_op_symbol = entry_sym;
+ emit_expr (&exp, 2);
+
+ if (sw_64_evax_proc->pdsckind == PDSC_S_K_KIND_FP_REGISTER)
+ return;
+
+ /* pdesc+24: register masks. */
+ p = frag_more (8);
+ md_number_to_chars (p, sw_64_evax_proc->imask, 4);
+ md_number_to_chars (p + 4, sw_64_evax_proc->fmask, 4);
+
+ if (sw_64_evax_proc->handler)
+ {
+ p = frag_more (8);
+ fixp = fix_new (frag_now, p - frag_now->fr_literal, 8,
+ sw_64_evax_proc->handler, 0, 0, BFD_RELOC_64);
+ }
+
+ if (sw_64_evax_proc->handler_data)
+ {
+ p = frag_more (8);
+ md_number_to_chars (p, sw_64_evax_proc->handler_data, 8);
+ }
+}
+
+/* Support for crash debug on vms. */
+
+static void
+s_sw_64_name (int ignore ATTRIBUTE_UNUSED)
+{
+ char *p;
+ expressionS exp;
+
+ if (now_seg != sw_64_link_section)
+ {
+ as_bad (_ (".name directive not in link (.link) section"));
+ demand_empty_rest_of_line ();
+ return;
+ }
+
+ expression (&exp);
+ if (exp.X_op != O_symbol)
+ {
+ as_warn (_ (".name directive has no symbol"));
+ demand_empty_rest_of_line ();
+ return;
+ }
+
+ demand_empty_rest_of_line ();
+
+#ifdef md_flush_pending_output
+ md_flush_pending_output ();
+#endif
+
+ frag_align (3, 0, 0);
+ p = frag_more (8);
+
+ fix_new_exp (frag_now, p - frag_now->fr_literal, 8, &exp, 0, BFD_RELOC_64);
+}
+
+/* Parse .linkage <symbol>.
+ Create a linkage pair relocation. */
+
+static void
+s_sw_64_linkage (int ignore ATTRIBUTE_UNUSED)
+{
+ expressionS exp;
+ char *p;
+ fixS *fixp;
+
+#ifdef md_flush_pending_output
+ md_flush_pending_output ();
+#endif
+
+ expression (&exp);
+ if (exp.X_op != O_symbol)
+ {
+ as_fatal (_ ("No symbol after .linkage"));
+ }
+ else
+ {
+ struct sw_64_linkage_fixups *linkage_fixup;
+
+ p = frag_more (LKP_S_K_SIZE);
+ memset (p, 0, LKP_S_K_SIZE);
+ fixp = fix_new_exp (frag_now, p - frag_now->fr_literal, LKP_S_K_SIZE,
+ &exp, 0, BFD_RELOC_SW_64_LINKAGE);
+
+ if (sw_64_insn_label == NULL)
+ sw_64_insn_label
+ = symbol_new (FAKE_LABEL_NAME, now_seg, frag_now, frag_now_fix ());
+
+ /* Create a linkage element. */
+ linkage_fixup = XNEW (struct sw_64_linkage_fixups);
+ linkage_fixup->fixp = fixp;
+ linkage_fixup->next = NULL;
+ linkage_fixup->label = sw_64_insn_label;
+
+ /* Append it to the list. */
+ if (sw_64_linkage_fixup_root == NULL)
+ sw_64_linkage_fixup_root = linkage_fixup;
+ else
+ sw_64_linkage_fixup_tail->next = linkage_fixup;
+ sw_64_linkage_fixup_tail = linkage_fixup;
+ }
+ demand_empty_rest_of_line ();
+}
+
+/* Parse .code_address <symbol>.
+ Create a code address relocation. */
+
+static void
+s_sw_64_code_address (int ignore ATTRIBUTE_UNUSED)
+{
+ expressionS exp;
+ char *p;
+
+#ifdef md_flush_pending_output
+ md_flush_pending_output ();
+#endif
+
+ expression (&exp);
+ if (exp.X_op != O_symbol)
+ as_fatal (_ ("No symbol after .code_address"));
+ else
+ {
+ p = frag_more (8);
+ memset (p, 0, 8);
+ fix_new_exp (frag_now, p - frag_now->fr_literal, 8, &exp, 0,
+ BFD_RELOC_SW_64_CODEADDR);
+ }
+ demand_empty_rest_of_line ();
+}
+
+static void
+s_sw_64_fp_save (int ignore ATTRIBUTE_UNUSED)
+{
+ sw_64_evax_proc->fp_save = tc_get_register (1);
+
+ demand_empty_rest_of_line ();
+}
+
+static void
+s_sw_64_mask (int ignore ATTRIBUTE_UNUSED)
+{
+ long val;
+
+ if (get_absolute_expression_and_terminator (&val) != ',')
+ {
+ as_warn (_ ("Bad .mask directive"));
+ --input_line_pointer;
+ }
+ else
+ {
+ sw_64_evax_proc->imask = val;
+ (void) get_absolute_expression ();
+ }
+ demand_empty_rest_of_line ();
+}
+
+static void
+s_sw_64_fmask (int ignore ATTRIBUTE_UNUSED)
+{
+ long val;
+
+ if (get_absolute_expression_and_terminator (&val) != ',')
+ {
+ as_warn (_ ("Bad .fmask directive"));
+ --input_line_pointer;
+ }
+ else
+ {
+ sw_64_evax_proc->fmask = val;
+ (void) get_absolute_expression ();
+ }
+ demand_empty_rest_of_line ();
+}
+
+static void
+s_sw_64_end (int ignore ATTRIBUTE_UNUSED)
+{
+ char *name;
+ char c;
+
+ c = get_symbol_name (&name);
+ (void) restore_line_pointer (c);
+ demand_empty_rest_of_line ();
+ sw_64_evax_proc = NULL;
+}
+
+static void
+s_sw_64_file (int ignore ATTRIBUTE_UNUSED)
+{
+ symbolS *s;
+ int length;
+ static char case_hack[32];
+
+ sprintf (case_hack, "<CASE:%01d%01d>", sw_64_flag_hash_long_names,
+ sw_64_flag_show_after_trunc);
+
+ s = symbol_find_or_make (case_hack);
+ symbol_get_bfdsym (s)->flags |= BSF_FILE;
+
+ get_absolute_expression ();
+ s = symbol_find_or_make (demand_copy_string (&length));
+ symbol_get_bfdsym (s)->flags |= BSF_FILE;
+ demand_empty_rest_of_line ();
+}
+#endif /* OBJ_EVAX */
+
+/* Handle the .gprel32 pseudo op. */
+
+static void
+s_sw_64_gprel32 (int ignore ATTRIBUTE_UNUSED)
+{
+ expressionS e;
+ char *p;
+
+ SKIP_WHITESPACE ();
+ expression (&e);
+
+#ifdef OBJ_ELF
+ switch (e.X_op)
+ {
+ case O_constant:
+ e.X_add_symbol = section_symbol (absolute_section);
+ e.X_op = O_symbol;
+ /* FALLTHRU */
+ case O_symbol:
+ break;
+ default:
+ abort ();
+ }
+#else
+#ifdef OBJ_ECOFF
+ switch (e.X_op)
+ {
+ case O_constant:
+ e.X_add_symbol = section_symbol (absolute_section);
+ /* fall through */
+ case O_symbol:
+ e.X_op = O_subtract;
+ e.X_op_symbol = sw_64_gp_symbol;
+ break;
+ default:
+ abort ();
+ }
+#endif
+#endif
+
+ if (sw_64_auto_align_on && sw_64_current_align < 2)
+ sw_64_align (2, (char *) NULL, sw_64_insn_label, 0);
+ if (sw_64_current_align > 2)
+ sw_64_current_align = 2;
+ sw_64_insn_label = NULL;
+
+ p = frag_more (4);
+ memset (p, 0, 4);
+ fix_new_exp (frag_now, p - frag_now->fr_literal, 4, &e, 0, BFD_RELOC_GPREL32);
+}
+
+/* Handle floating point allocation pseudo-ops. This is like the
+ generic version, but it makes sure the current label, if any, is
+ correctly aligned. */
+
+static void
+s_sw_64_float_cons (int type)
+{
+ int log_size;
+
+ switch (type)
+ {
+ default:
+ case 'f':
+ case 'F':
+ log_size = 2;
+ break;
+
+ case 'd':
+ case 'D':
+ case 'G':
+ log_size = 3;
+ break;
+
+ case 'x':
+ case 'X':
+ case 'p':
+ case 'P':
+ log_size = 4;
+ break;
+ }
+
+ if (sw_64_auto_align_on && sw_64_current_align < log_size)
+ sw_64_align (log_size, (char *) NULL, sw_64_insn_label, 0);
+ if (sw_64_current_align > log_size)
+ sw_64_current_align = log_size;
+ sw_64_insn_label = NULL;
+
+ float_cons (type);
+}
+
+/* Handle the .proc pseudo op. We don't really do much with it except
+ parse it. */
+
+static void
+s_sw_64_proc (int is_static ATTRIBUTE_UNUSED)
+{
+ char *name;
+ char c;
+ char *p;
+ symbolS *symbolP;
+ int temp;
+
+ /* Takes ".proc name,nargs". */
+ SKIP_WHITESPACE ();
+ c = get_symbol_name (&name);
+ p = input_line_pointer;
+ symbolP = symbol_find_or_make (name);
+ *p = c;
+ SKIP_WHITESPACE_AFTER_NAME ();
+ if (*input_line_pointer != ',')
+ {
+ *p = 0;
+ as_warn (_ ("Expected comma after name \"%s\""), name);
+ *p = c;
+ temp = 0;
+ ignore_rest_of_line ();
+ }
+ else
+ {
+ input_line_pointer++;
+ temp = get_absolute_expression ();
+ }
+ /* *symbol_get_obj (symbolP) = (signed char) temp; */
+ (void) symbolP;
+ as_warn (_ ("unhandled: .proc %s,%d"), name, temp);
+ demand_empty_rest_of_line ();
+}
+
+/* Handle the .set pseudo op. This is used to turn on and off most of
+ the assembler features. */
+
+static void
+s_sw_64_set (int x ATTRIBUTE_UNUSED)
+{
+ char *name, ch, *s;
+ int yesno = 1;
+
+ SKIP_WHITESPACE ();
+
+ ch = get_symbol_name (&name);
+ s = name;
+ if (s[0] == 'n' && s[1] == 'o')
+ {
+ yesno = 0;
+ s += 2;
+ }
+ if (!strcmp ("reorder", s))
+ /* ignore */;
+ else if (!strcmp ("at", s))
+ sw_64_noat_on = !yesno;
+ else if (!strcmp ("macro", s))
+ sw_64_macros_on = yesno;
+ else if (!strcmp ("move", s))
+ /* ignore */;
+ else if (!strcmp ("volatile", s))
+ /* ignore */;
+ else
+ as_warn (_ ("Tried to .set unrecognized mode `%s'"), name);
+
+ (void) restore_line_pointer (ch);
+ demand_empty_rest_of_line ();
+}
+
+/* Handle the .base pseudo op. This changes the assembler's notion of
+ the $gp register. */
+
+static void
+s_sw_64_base (int ignore ATTRIBUTE_UNUSED)
+{
+ SKIP_WHITESPACE ();
+
+ if (*input_line_pointer == '$')
+ {
+ /* $rNN form. */
+ input_line_pointer++;
+ if (*input_line_pointer == 'r')
+ input_line_pointer++;
+ }
+
+ sw_64_gp_register = get_absolute_expression ();
+ if (sw_64_gp_register < 0 || sw_64_gp_register > 31)
+ {
+ sw_64_gp_register = AXP_REG_GP;
+ as_warn (_ ("Bad base register, using $%d."), sw_64_gp_register);
+ }
+
+ demand_empty_rest_of_line ();
+}
+
+/* Handle the .align pseudo-op. This aligns to a power of two. It
+ also adjusts any current instruction label. We treat this the same
+ way the MIPS port does: .align 0 turns off auto alignment. */
+
+static void
+s_sw_64_align (int ignore ATTRIBUTE_UNUSED)
+{
+ int align;
+ char fill, *pfill;
+ long max_alignment = 16;
+
+ align = get_absolute_expression ();
+ if (align > max_alignment)
+ {
+ align = max_alignment;
+ as_bad (_ ("Alignment too large: %d. assumed"), align);
+ }
+ else if (align < 0)
+ {
+ as_warn (_ ("Alignment negative: 0 assumed"));
+ align = 0;
+ }
+
+ if (*input_line_pointer == ',')
+ {
+ input_line_pointer++;
+ fill = get_absolute_expression ();
+ pfill = &fill;
+ }
+ else
+ pfill = NULL;
+
+ if (align != 0)
+ {
+ sw_64_auto_align_on = 1;
+ sw_64_align (align, pfill, NULL, 1);
+ }
+ else
+ {
+ sw_64_auto_align_on = 0;
+ }
+ sw_64_insn_label = NULL;
+
+ demand_empty_rest_of_line ();
+}
+
+/* Hook the normal string processor to reset known alignment. */
+
+static void
+s_sw_64_stringer (int terminate)
+{
+ sw_64_current_align = 0;
+ sw_64_insn_label = NULL;
+ stringer (8 + terminate);
+}
+
+/* Hook the normal space processing to reset known alignment. */
+
+static void
+s_sw_64_space (int ignore)
+{
+ sw_64_current_align = 0;
+ sw_64_insn_label = NULL;
+ s_space (ignore);
+}
+
+/* Hook into cons for auto-alignment. */
+
+void
+sw_64_cons_align (int size)
+{
+ int log_size;
+
+ log_size = 0;
+ while ((size >>= 1) != 0)
+ ++log_size;
+
+ if (sw_64_auto_align_on && sw_64_current_align < log_size)
+ sw_64_align (log_size, (char *) NULL, sw_64_insn_label, 0);
+ if (sw_64_current_align > log_size)
+ sw_64_current_align = log_size;
+ sw_64_insn_label = NULL;
+}
+
+/* Here come the .uword, .ulong, and .uquad explicitly unaligned
+ pseudos. We just turn off auto-alignment and call down to cons. */
+
+static void
+s_sw_64_ucons (int bytes)
+{
+ int hold = sw_64_auto_align_on;
+ sw_64_auto_align_on = 0;
+ cons (bytes);
+ sw_64_auto_align_on = hold;
+}
+
+/* Switch the working cpu type. */
+
+static void
+s_sw_64_arch (int ignored ATTRIBUTE_UNUSED)
+{
+ char *name, ch;
+ const struct cpu_type *p;
+
+ SKIP_WHITESPACE ();
+
+ ch = get_symbol_name (&name);
+
+ for (p = cpu_types; p->name; ++p)
+ if (strcmp (name, p->name) == 0)
+ {
+ sw_64_target_name = p->name, sw_64_target = p->flags;
+ goto found;
+ }
+ as_warn (_ ("Unknown CPU identifier `%s'"), name);
+
+found:
+ (void) restore_line_pointer (ch);
+ demand_empty_rest_of_line ();
+}
+
+#ifdef DEBUG1
+/* print token expression with sw_64 specific extension. */
+
+static void
+sw_64_print_token (FILE *f, const expressionS *exp)
+{
+ switch (exp->X_op)
+ {
+ case O_cpregister:
+ putc (',', f);
+ /* FALLTHRU */
+ case O_pregister:
+ putc ('(', f);
+ {
+ expressionS nexp = *exp;
+ nexp.X_op = O_register;
+ print_expr_1 (f, &nexp);
+ }
+ putc (')', f);
+ break;
+ default:
+ print_expr_1 (f, exp);
+ break;
+ }
+}
+#endif
+
+/* The target specific pseudo-ops which we support. */
+
+const pseudo_typeS md_pseudo_table[] = {
+#ifdef OBJ_ECOFF
+ {"comm", s_sw_64_comm, 0}, /* OSF1 compiler does this. */
+ {"rdata", s_sw_64_rdata, 0},
+#endif
+ {"text", s_sw_64_text, 0},
+ {"data", s_sw_64_data, 0},
+#ifdef OBJ_ECOFF
+ {"sdata", s_sw_64_sdata, 0},
+#endif
+#ifdef OBJ_ELF
+ {"section", s_sw_64_section, 0},
+ {"section.s", s_sw_64_section, 0},
+ {"sect", s_sw_64_section, 0},
+ {"sect.s", s_sw_64_section, 0},
+#endif
+#ifdef OBJ_EVAX
+ {"section", s_sw_64_section, 0},
+ {"literals", s_sw_64_literals, 0},
+ {"pdesc", s_sw_64_pdesc, 0},
+ {"name", s_sw_64_name, 0},
+ {"linkage", s_sw_64_linkage, 0},
+ {"code_address", s_sw_64_code_address, 0},
+ {"ent", s_sw_64_ent, 0},
+ {"frame", s_sw_64_frame, 0},
+ {"fp_save", s_sw_64_fp_save, 0},
+ {"mask", s_sw_64_mask, 0},
+ {"fmask", s_sw_64_fmask, 0},
+ {"end", s_sw_64_end, 0},
+ {"file", s_sw_64_file, 0},
+ {"rdata", s_sw_64_section, 1},
+ {"comm", s_sw_64_comm, 0},
+ {"link", s_sw_64_section, 3},
+ {"ctors", s_sw_64_section, 4},
+ {"dtors", s_sw_64_section, 5},
+ {"handler", s_sw_64_handler, 0},
+ {"handler_data", s_sw_64_handler, 1},
+#endif
+#ifdef OBJ_ELF
+ /* Frame related pseudos. */
+ {"ent", s_sw_64_ent, 0},
+ {"end", s_sw_64_end, 0},
+ {"mask", s_sw_64_mask, 0},
+ {"fmask", s_sw_64_mask, 1},
+ {"frame", s_sw_64_frame, 0},
+ {"prologue", s_sw_64_prologue, 0},
+ {"file", s_sw_64_file, 5},
+ {"loc", s_sw_64_loc, 9},
+ {"stabs", s_sw_64_stab, 's'},
+ {"stabn", s_sw_64_stab, 'n'},
+ {"usepv", s_sw_64_usepv, 0},
+ /* COFF debugging related pseudos. */
+ {"begin", s_sw_64_coff_wrapper, 0},
+ {"bend", s_sw_64_coff_wrapper, 1},
+ {"def", s_sw_64_coff_wrapper, 2},
+ {"dim", s_sw_64_coff_wrapper, 3},
+ {"endef", s_sw_64_coff_wrapper, 4},
+ {"scl", s_sw_64_coff_wrapper, 5},
+ {"tag", s_sw_64_coff_wrapper, 6},
+ {"val", s_sw_64_coff_wrapper, 7},
+#else
+#ifdef OBJ_EVAX
+ {"prologue", s_sw_64_prologue, 0},
+#else
+ {"prologue", s_ignore, 0},
+#endif
+#endif
+ {"gprel32", s_sw_64_gprel32, 0},
+ {"t_floating", s_sw_64_float_cons, 'd'},
+ {"s_floating", s_sw_64_float_cons, 'f'},
+ {"f_floating", s_sw_64_float_cons, 'F'},
+ {"g_floating", s_sw_64_float_cons, 'G'},
+ {"d_floating", s_sw_64_float_cons, 'D'},
+
+ {"proc", s_sw_64_proc, 0},
+ {"aproc", s_sw_64_proc, 1},
+ {"set", s_sw_64_set, 0},
+ {"reguse", s_ignore, 0},
+ {"livereg", s_ignore, 0},
+ {"base", s_sw_64_base, 0}, /*??*/
+ {"option", s_ignore, 0},
+ {"aent", s_ignore, 0},
+ {"ugen", s_ignore, 0},
+ {"eflag", s_ignore, 0},
+
+ {"align", s_sw_64_align, 0},
+ {"double", s_sw_64_float_cons, 'd'},
+ {"float", s_sw_64_float_cons, 'f'},
+ {"single", s_sw_64_float_cons, 'f'},
+ {"ascii", s_sw_64_stringer, 0},
+ {"asciz", s_sw_64_stringer, 1},
+ {"string", s_sw_64_stringer, 1},
+ {"space", s_sw_64_space, 0},
+ {"skip", s_sw_64_space, 0},
+ {"zero", s_sw_64_space, 0},
+
+ /* Unaligned data pseudos. */
+ {"uword", s_sw_64_ucons, 2},
+ {"ulong", s_sw_64_ucons, 4},
+ {"uquad", s_sw_64_ucons, 8},
+
+#ifdef OBJ_ELF
+ /* Dwarf wants these versions of unaligned. */
+ {"2byte", s_sw_64_ucons, 2},
+ {"4byte", s_sw_64_ucons, 4},
+ {"8byte", s_sw_64_ucons, 8},
+#endif
+
+ /* We don't do any optimizing, so we can safely ignore these. */
+ {"noalias", s_ignore, 0},
+ {"alias", s_ignore, 0},
+
+ {"arch", s_sw_64_arch, 0},
+
+ {NULL, 0, 0},
+};
+
+#ifdef OBJ_ECOFF
+
+/* @@@ GP selection voodoo. All of this seems overly complicated and
+ unnecessary; which is the primary reason it's for ECOFF only. */
+
+static inline void
+maybe_set_gp (asection *sec)
+{
+ bfd_vma vma;
+
+ if (!sec)
+ return;
+ vma = bfd_section_vma (sec);
+ if (vma && vma < sw_64_gp_value)
+ sw_64_gp_value = vma;
+}
+
+static void
+select_gp_value (void)
+{
+ gas_assert (sw_64_gp_value == 0);
+
+ /* Get minus-one in whatever width... */
+ sw_64_gp_value = 0;
+ sw_64_gp_value--;
+
+ /* Select the smallest VMA of these existing sections. */
+ maybe_set_gp (sw_64_lita_section);
+
+/* @@ Will a simple 0x8000 work here? If not, why not? */
+#define GP_ADJUSTMENT (0x8000 - 0x10)
+
+ sw_64_gp_value += GP_ADJUSTMENT;
+
+ S_SET_VALUE (sw_64_gp_symbol, sw_64_gp_value);
+
+#ifdef DEBUG1
+ printf (_ ("Chose GP value of %lx\n"), sw_64_gp_value);
+#endif
+}
+#endif /* OBJ_ECOFF */
+
+#ifdef OBJ_ELF
+/* Map 's' to SHF_SW_64_GPREL. */
+
+bfd_vma
+sw_64_elf_section_letter (int letter, const char **ptr_msg)
+{
+ if (letter == 's')
+ return SHF_SW_64_GPREL;
+
+ *ptr_msg = _ ("bad .section directive: want a,s,w,x,M,S,G,T in string");
+ return -1;
+}
+
+/* Map SHF_SW_64_GPREL to SEC_SMALL_DATA. */
+
+flagword
+sw_64_elf_section_flags (flagword flags, bfd_vma attr,
+ int type ATTRIBUTE_UNUSED)
+{
+ if (attr & SHF_SW_64_GPREL)
+ flags |= SEC_SMALL_DATA;
+ return flags;
+}
+#endif /* OBJ_ELF */
+
+/* This is called from HANDLE_ALIGN in write.c. Fill in the contents
+ of an rs_align_code fragment. */
+
+void
+sw_64_handle_align (fragS *fragp)
+{
+ static unsigned char const unop[4] = {0x5f, 0x07, 0xff, 0x43};
+ static unsigned char const nopunop[8]
+ = {0x5f, 0x07, 0xff, 0x43, 0x5f, 0x07, 0xff, 0x43};
+
+ int bytes, fix;
+ char *p;
+
+ if (fragp->fr_type != rs_align_code)
+ return;
+
+ bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
+ p = fragp->fr_literal + fragp->fr_fix;
+ fix = 0;
+
+ if (bytes & 3)
+ {
+ fix = bytes & 3;
+ memset (p, 0, fix);
+ p += fix;
+ bytes -= fix;
+ }
+
+ if (bytes & 4)
+ {
+ memcpy (p, unop, 4);
+ p += 4;
+ bytes -= 4;
+ fix += 4;
+ }
+
+ memcpy (p, nopunop, 8);
+
+ fragp->fr_fix += fix;
+ fragp->fr_var = 8;
+}
+
+/* Public interface functions. */
+
+/* This function is called once, at assembler startup time. It sets
+ up all the tables, etc. that the MD part of the assembler will
+ need, that can be determined before arguments are parsed. */
+
+void
+md_begin (void)
+{
+ unsigned int i;
+
+ /* Verify that X_op field is wide enough. */
+ {
+ expressionS e;
+
+ e.X_op = O_max;
+ gas_assert (e.X_op == O_max);
+ }
+
+ if (!bfd_set_arch_mach (stdoutput, bfd_arch_sw_64, file_sw_64_arch))
+ as_warn (_ ("could not set architecture and machine"));
+
+ /* Create the opcode hash table. */
+ sw_64_opcode_hash = str_htab_create ();
+
+ for (i = 0; i < sw_64_num_opcodes;)
+ {
+ const char *name, *slash;
+
+ name = sw_64_opcodes[i].name;
+ if (str_hash_insert (sw_64_opcode_hash, name, &sw_64_opcodes[i], 0))
+ as_fatal (_ ("internal error: can't hash opcode `%s': %s"), name);
+
+ /* Some opcodes include modifiers of various sorts with a "/mod"
+ syntax, like the architecture manual suggests. However, for
+ use with gcc at least, we also need access to those same opcodes
+ without the "/". */
+
+ if ((slash = strchr (name, '/')) != NULL)
+ {
+ size_t len = strlen (name);
+ char *p = notes_alloc (len);
+ size_t len1 = slash - name;
+
+ memcpy (p, name, len1);
+ memcpy (p + len1, slash + 1, len - len1);
+
+ (void) str_hash_insert (sw_64_opcode_hash, p, &sw_64_opcodes[i], 0);
+ /* Ignore failures -- the opcode table does duplicate some
+ variants in different forms, like "hw_stq" and "hw_st/q". */
+ }
+
+ while (++i < sw_64_num_opcodes
+ && (sw_64_opcodes[i].name == name
+ || !strcmp (sw_64_opcodes[i].name, name)))
+ continue;
+ }
+
+ /* Create the macro hash table. */
+ sw_64_macro_hash = str_htab_create ();
+
+ for (i = 0; i < sw_64_num_macros;)
+ {
+ const char *name, *retval;
+
+ name = sw_64_macros[i].name;
+ if (str_hash_insert (sw_64_macro_hash, name, &sw_64_macros[i], 0))
+ as_fatal (_ ("internal error: can't hash macro `%s': %s"), name);
+
+ while (++i < sw_64_num_macros
+ && (sw_64_macros[i].name == name
+ || !strcmp (sw_64_macros[i].name, name)))
+ continue;
+ }
+
+ /* Construct symbols for each of the registers. */
+ for (i = 0; i < 32; ++i)
+ {
+ char name[4];
+
+ sprintf (name, "$%d", i);
+ sw_64_register_table[i]
+ = symbol_create (name, reg_section, &zero_address_frag, i);
+ }
+
+ for (; i < 64; ++i)
+ {
+ char name[5];
+
+ sprintf (name, "$f%d", i - 32);
+ sw_64_register_table[i]
+ = symbol_create (name, reg_section, &zero_address_frag, i);
+ }
+
+ /* Create the special symbols and sections we'll be using. */
+
+ /* So .sbss will get used for tiny objects. */
+ bfd_set_gp_size (stdoutput, g_switch_value);
+
+#ifdef OBJ_ECOFF
+ create_literal_section (".lita", &sw_64_lita_section, &sw_64_lita_symbol);
+
+ /* For handling the GP, create a symbol that won't be output in the
+ symbol table. We'll edit it out of relocs later. */
+ sw_64_gp_symbol = symbol_create ("<GP value>", sw_64_lita_section,
+ &zero_address_frag, 0x8000);
+#endif
+
+#ifdef OBJ_EVAX
+ create_literal_section (".link", &sw_64_link_section, &sw_64_link_symbol);
+#endif
+
+#ifdef OBJ_ELF
+ if (ECOFF_DEBUGGING)
+ {
+ segT sec = subseg_new (".mdebug", (subsegT) 0);
+ bfd_set_section_flags (sec, SEC_HAS_CONTENTS | SEC_READONLY);
+ bfd_set_section_alignment (sec, 3);
+ }
+#endif
+
+ /* Create literal lookup hash table. */
+ sw_64_literal_hash = str_htab_create ();
+
+ subseg_set (text_section, 0);
+}
+
+/* The public interface to the instruction assembler. */
+
+void
+md_assemble (char *str)
+{
+ /* Current maximum is 13. */
+ char opname[32];
+ expressionS tok[MAX_INSN_ARGS];
+ int ntok, trunclen;
+ size_t opnamelen;
+
+ /* Split off the opcode. */
+ opnamelen = strspn (str, "abcdefghijklmnopqrstuvwxyz_/0123456789");
+ trunclen
+ = (opnamelen < sizeof (opname) - 1 ? opnamelen : sizeof (opname) - 1);
+ memcpy (opname, str, trunclen);
+ opname[trunclen] = '\0';
+
+ if (!strcmp (opname, "ldw_inc") || !strcmp (opname, "ldl_inc")
+ || !strcmp (opname, "ldw_dec") || !strcmp (opname, "ldl_dec")
+ || !strcmp (opname, "ldw_set") || !strcmp (opname, "ldl_set"))
+ {
+ if (!strcmp (sw_64_target_name, "sw6a"))
+ as_warn (_ ("The %s is only 6A instruction, you better change it!"),
+ opname);
+ if (!strcmp (sw_64_target_name, "sw6b"))
+ as_bad (_ ("sw6b don't have \"%s\" !!"), (char *) opname);
+ }
+
+ /* Tokenize the rest of the line. */
+ if ((ntok = tokenize_arguments (str + opnamelen, tok, MAX_INSN_ARGS)) < 0)
+ {
+ if (ntok != TOKENIZE_ERROR_REPORT)
+ as_bad (_ ("syntax error"));
+
+ return;
+ }
+
+#define REG_NUM 64
+ int i, j;
+ char float_insn[REG_NUM][REG_NUM]
+ = {"fadds", "faddd", "fsubs", "fsubd", "fmuls", "fmuld",
+ "fdivs", "fdivd", "fsqrts", "fsqrtd", "fcmpeq", "fcmple",
+ "fcmplt", "fcmpun", "fcvtsd", "fcvtds", "fcvtdl_g", "fcvtdl_p",
+ "fcvtdl_z", "fcvtdl_n", "fcvtdl", "fcvtlw", "fcvtls", "fcvtld",
+ "fmas", "fmad", "fmss", "fmsd", "fnmas", "fnmad",
+ "fnmss", "fnmsd", "vadds", "vaddd", "vsubs", "vsubd",
+ "vmuls", "vmuld", "vdivs", "vdivd", "vsqrts", "vsqrtd",
+ "vmas", "vmad", "vmss", "vmsd", "vnmas", "vnmad",
+ "vnmss", "vnmsd", "vfcmpeq", "vfcmple", "vfcmplt", "vfcmpun"};
+ if (!sw_64_flag_nocheck_samereg)
+ {
+ for (i = 0; i < REG_NUM; i++)
+ {
+ if (strcmp (opname, float_insn[i]) == 0)
+ {
+ for (j = 0; j < ntok - 1; j++)
+ {
+ if (tok[j].X_add_number == tok[ntok - 1].X_add_number)
+ if (!strcmp (sw_64_target_name, "sw6b"))
+ ;
+ else if (!strcmp (sw_64_target_name, "sw8a"))
+ ;
+ else
+ as_warn (
+ _ ("SRC and DST register of '%s' must be different!"),
+ opname);
+ }
+ }
+ }
+ }
+ /* sw8a don't have 'rd_f', 'wr_f' and atomic_op instructions. */
+ if (!strcmp (sw_64_target_name, "sw8a")
+ && (!strcmp (opname, "rd_f") || !strcmp (opname, "wr_f")
+ || !strcmp (opname, "ldw_inc") || !strcmp (opname, "ldl_inc")
+ || !strcmp (opname, "ldw_dec") || !strcmp (opname, "ldl_dec")
+ || !strcmp (opname, "ldw_set") || !strcmp (opname, "ldw_set")))
+ as_warn (_ ("sw8a don't have \"%s\" !!"), (char *) opname);
+
+ if (sw_64_branch_separate == 1)
+ {
+ if ((strcmp (opname, "beq") == 0) || (strcmp (opname, "bne") == 0)
+ || (strcmp (opname, "bge") == 0) || (strcmp (opname, "bgt") == 0)
+ || (strcmp (opname, "ble") == 0) || (strcmp (opname, "blt") == 0)
+ || (strcmp (opname, "blbc") == 0) || (strcmp (opname, "blbs") == 0)
+ || (strcmp (opname, "fbeq") == 0) || (strcmp (opname, "fbge") == 0)
+ || (strcmp (opname, "fbgt") == 0) || (strcmp (opname, "fble") == 0)
+ || (strcmp (opname, "fblt") == 0) || (strcmp (opname, "fbne") == 0))
+ {
+ if (nop_quantity > 0)
+ {
+ tokenize_arguments ("", toksave, MAX_INSN_ARGS);
+ for (; nop_quantity < 4; nop_quantity++)
+ assemble_tokens ("nop", toksave, 0, 1);
+ }
+ nop_quantity = 1;
+ }
+ else
+ {
+ if (nop_quantity > 0)
+ nop_quantity = (nop_quantity + 1) % 4;
+ }
+ }
+
+ /* Finish it off. */
+ assemble_tokens (opname, tok, ntok, sw_64_macros_on);
+}
+
+/* Round up a section's size to the appropriate boundary. */
+
+valueT
+md_section_align (segT seg, valueT size)
+{
+ int align = bfd_section_alignment (seg);
+ valueT mask = ((valueT) 1 << align) - 1;
+
+ return (size + mask) & ~mask;
+}
+
+/* Turn a string in input_line_pointer into a floating point constant
+ of type TYPE, and store the appropriate bytes in *LITP. The number
+ of LITTLENUMS emitted is stored in *SIZEP. An error message is
+ returned, or NULL on OK. */
+
+const char *
+md_atof (int type, char *litP, int *sizeP)
+{
+ extern const char *vax_md_atof (int, char *, int *);
+
+ switch (type)
+ {
+ /* VAX floats. */
+ case 'G':
+ /* vax_md_atof () doesn't like "G" for some reason. */
+ type = 'g';
+ /* Fall through. */
+ case 'F':
+ case 'D':
+ return vax_md_atof (type, litP, sizeP);
+
+ default:
+ return ieee_md_atof (type, litP, sizeP, FALSE);
+ }
+}
+
+/* Take care of the target-specific command-line options. */
+
+int
+md_parse_option (int c, const char *arg)
+{
+ switch (c)
+ {
+ case 'F':
+ sw_64_nofloats_on = 1;
+ break;
+
+ case OPTION_32ADDR:
+ sw_64_addr32_on = 1;
+ break;
+
+ case 'g':
+ sw_64_debug = 1;
+ break;
+
+ case 'G':
+ g_switch_value = atoi (arg);
+ break;
+
+ case 'm': {
+ const struct cpu_type *p;
+
+ for (p = cpu_types; p->name; ++p)
+ if (strcmp (arg, p->name) == 0)
+ {
+ sw_64_target_name = p->name, sw_64_target = p->flags;
+ goto found;
+ }
+ as_warn (_ ("Unknown CPU identifier `%s'"), arg);
+ found:;
+ }
+ break;
+
+#ifdef OBJ_EVAX
+ case '+': /* For g++. Hash any name > 63 chars long. */
+ sw_64_flag_hash_long_names = 1;
+ break;
+
+ case 'H': /* Show new symbol after hash truncation. */
+ sw_64_flag_show_after_trunc = 1;
+ break;
+
+ case 'h': /* For gnu-c/vax compatibility. */
+ break;
+
+ case OPTION_REPLACE:
+ sw_64_flag_replace = 1;
+ break;
+
+ case OPTION_NOREPLACE:
+ sw_64_flag_replace = 0;
+ break;
+#endif
+
+ case OPTION_RELAX:
+ sw_64_flag_relax = 1;
+ break;
+
+#ifdef OBJ_ELF
+ case OPTION_MDEBUG:
+ sw_64_flag_mdebug = 1;
+ break;
+ case OPTION_NO_MDEBUG:
+ sw_64_flag_mdebug = 0;
+ break;
+#endif
+
+ case OPTION_NOCHECK_SAMEREG:
+ sw_64_flag_nocheck_samereg = 1;
+ break;
+
+ case OPTION_LITERALGOT:
+ sw_64_literalgot_on = 1;
+ break;
+
+ case OPTION_TLSRELGOT_GOTTPREL:
+ sw_64_tlsrelgot_gottprel_on = 1;
+ break;
+
+ case OPTION_TLSRELGOT_GOTDTPREL:
+ sw_64_tlsrelgot_gotdtprel_on = 1;
+ break;
+
+ case OPTION_TLSRELGOT_TLSLDM:
+ sw_64_tlsrelgot_tlsldm_on = 1;
+ break;
+
+ case OPTION_TLSRELGOT_TLSGD:
+ sw_64_tlsrelgot_tlsgd_on = 1;
+ break;
+
+ case OPTION_GPREL16:
+ sw_64_gprel16_on = 1;
+ break;
+
+ case OPTION_BRANCH_SEPARATE:
+ sw_64_branch_separate = 1;
+ break;
+ case OPTION_NOBRANCH_SEPARATE:
+ sw_64_branch_separate = 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Print a description of the command-line options that we accept. */
+
+void
+md_show_usage (FILE *stream)
+{
+ fputs (_ ("\
+Sw_64 options:\n\
+-32addr treat addresses as 32-bit values\n\
+-F lack floating point instructions support\n\
+-msw6a | -msw6b\n\
+ these variants include PALcode opcodes\n\
+-tlsrelgot_tlsgd\n\
+ insert ldih instruction with tlsrel_got relocation before ldi instruction with tlsgd relocation\n\
+-tlsrelgot_tlsldm\n\
+ insert ldih instruction with tlsrel_got relocation before ldi instruction with tlsldm relocation\n\
+-literalgot\n\
+ insert ldih instruction with literal_got relocation before ldl instruction with literal relocation\n\
+-gprel16\n\
+ change gprel16 relocation to gprelhi+gprello relocation with ldih instruction and ldi/ldw/flds/fldd instruction\n"),
+ stream);
+
+#ifdef OBJ_EVAX
+ fputs (_ ("\
+VMS options:\n\
+-+ encode (don't truncate) names longer than 64 characters\n\
+-H show new symbol after hash truncation\n\
+-replace/-noreplace enable or disable the optimization of procedure calls\n"),
+ stream);
+#endif
+}
+
+/* Decide from what point a pc-relative relocation is relative to,
+ relative to the pc-relative fixup. Er, relatively speaking. */
+
+long
+md_pcrel_from (fixS *fixP)
+{
+ valueT addr = fixP->fx_where + fixP->fx_frag->fr_address;
+
+ switch (fixP->fx_r_type)
+ {
+ case BFD_RELOC_23_PCREL_S2:
+ case BFD_RELOC_SW_64_BR26:
+ case BFD_RELOC_SW_64_HINT:
+ case BFD_RELOC_SW_64_BRSGP:
+ return addr + 4;
+ default:
+ return addr;
+ }
+}
+
+/* Attempt to simplify or even eliminate a fixup. The return value is
+ ignored; perhaps it was once meaningful, but now it is historical.
+ To indicate that a fixup has been eliminated, set fixP->fx_done.
+
+ For ELF, here it is that we transform the GPDISP_HI16 reloc we used
+ internally into the GPDISP reloc used externally. We had to do
+ this so that we'd have the GPDISP_LO16 reloc as a tag to compute
+ the distance to the "ldi" instruction for setting the addend to
+ GPDISP. */
+
+void
+md_apply_fix (fixS *fixP, valueT *valP, segT seg)
+{
+ char *const fixpos = fixP->fx_frag->fr_literal + fixP->fx_where;
+ valueT value = *valP;
+ unsigned image, size;
+
+ switch (fixP->fx_r_type)
+ {
+ /* The GPDISP relocations are processed internally with a symbol
+ referring to the current function's section; we need to drop
+ in a value which, when added to the address of the start of
+ the function, gives the desired GP. */
+ case BFD_RELOC_SW_64_GPDISP_HI16: {
+ fixS *next = fixP->fx_next;
+
+ /* With user-specified !gpdisp relocations, we can be missing
+ the matching LO16 reloc. We will have already issued an
+ error message. */
+ if (next)
+ fixP->fx_offset = (next->fx_frag->fr_address + next->fx_where
+ - fixP->fx_frag->fr_address - fixP->fx_where);
+
+ value = (value - sign_extend_16 (value)) >> 16;
+ }
+#ifdef OBJ_ELF
+ fixP->fx_r_type = BFD_RELOC_SW_64_GPDISP;
+#endif
+ goto do_reloc_gp;
+
+ case BFD_RELOC_SW_64_GPDISP_LO16:
+ value = sign_extend_16 (value);
+ fixP->fx_offset = 0;
+#ifdef OBJ_ELF
+ fixP->fx_done = 1;
+#endif
+
+ do_reloc_gp:
+ fixP->fx_addsy = section_symbol (seg);
+ md_number_to_chars (fixpos, value, 2);
+ break;
+ case BFD_RELOC_8:
+ if (fixP->fx_pcrel)
+ fixP->fx_r_type = BFD_RELOC_8_PCREL;
+ size = 1;
+ goto do_reloc_xx;
+ case BFD_RELOC_16:
+ if (fixP->fx_pcrel)
+ fixP->fx_r_type = BFD_RELOC_16_PCREL;
+ size = 2;
+ goto do_reloc_xx;
+
+ case BFD_RELOC_32:
+ if (fixP->fx_pcrel)
+ fixP->fx_r_type = BFD_RELOC_32_PCREL;
+ size = 4;
+ goto do_reloc_xx;
+
+ case BFD_RELOC_64:
+ if (fixP->fx_pcrel)
+ fixP->fx_r_type = BFD_RELOC_64_PCREL;
+ size = 8;
+
+ do_reloc_xx:
+ if (fixP->fx_pcrel == 0 && fixP->fx_addsy == 0)
+ {
+ md_number_to_chars (fixpos, value, size);
+ goto done;
+ }
+ return;
+
+#ifdef OBJ_ECOFF
+ case BFD_RELOC_GPREL32:
+ gas_assert (fixP->fx_subsy == sw_64_gp_symbol);
+ fixP->fx_subsy = 0;
+ /* FIXME: inherited this obliviousness of `value' -- why? */
+ md_number_to_chars (fixpos, -sw_64_gp_value, 4);
+ break;
+#else
+ case BFD_RELOC_GPREL32:
+#endif
+ case BFD_RELOC_GPREL16:
+ case BFD_RELOC_SW_64_GPREL_HI16:
+ case BFD_RELOC_SW_64_GPREL_LO16:
+ return;
+
+ case BFD_RELOC_23_PCREL_S2:
+ if (fixP->fx_pcrel == 0 && fixP->fx_addsy == 0)
+ {
+ image = bfd_getl32 (fixpos);
+ image = (image & ~0x1FFFFF) | ((value >> 2) & 0x1FFFFF);
+ goto write_done;
+ }
+ return;
+
+ case BFD_RELOC_SW_64_BR26:
+ if (fixP->fx_pcrel == 0 && fixP->fx_addsy == 0)
+ {
+ image = bfd_getl32 (fixpos);
+ image = (image & ~0x3FFFFFF) | ((value >> 2) & 0x3FFFFFF);
+ goto write_done;
+ }
+ return;
+
+ case BFD_RELOC_SW_64_HINT:
+ if (fixP->fx_pcrel == 0 && fixP->fx_addsy == 0)
+ {
+ image = bfd_getl32 (fixpos);
+ /* SW6 use 16 bit hint disp,not 14. */
+ image = (image & ~0xFFFF) | ((value >> 2) & 0xFFFF);
+ goto write_done;
+ }
+ return;
+
+#ifdef OBJ_ELF
+ case BFD_RELOC_SW_64_BRSGP:
+ return;
+
+ case BFD_RELOC_SW_64_TLSGD:
+ case BFD_RELOC_SW_64_TLSLDM:
+ case BFD_RELOC_SW_64_GOTDTPREL16:
+ case BFD_RELOC_SW_64_DTPREL_HI16:
+ case BFD_RELOC_SW_64_DTPREL_LO16:
+ case BFD_RELOC_SW_64_DTPREL16:
+ case BFD_RELOC_SW_64_GOTTPREL16:
+ case BFD_RELOC_SW_64_TPREL_HI16:
+ case BFD_RELOC_SW_64_TPREL_LO16:
+ case BFD_RELOC_SW_64_TPREL16:
+ if (fixP->fx_addsy)
+ S_SET_THREAD_LOCAL (fixP->fx_addsy);
+ return;
+#endif
+
+#ifdef OBJ_ECOFF
+ case BFD_RELOC_SW_64_LITERAL:
+ md_number_to_chars (fixpos, value, 2);
+ return;
+#endif
+ case BFD_RELOC_SW_64_ELF_LITERAL:
+ case BFD_RELOC_SW_64_ELF_LITERAL_GOT:
+ case BFD_RELOC_SW_64_LITUSE:
+ case BFD_RELOC_SW_64_LINKAGE:
+ case BFD_RELOC_SW_64_CODEADDR:
+ case BFD_RELOC_SW_64_TLSREL_GOT:
+ return;
+
+#ifdef OBJ_EVAX
+ case BFD_RELOC_SW_64_NOP:
+ value -= (8 + 4); /* PC-relative, base is call+4. */
+
+ /* From B.4.5.2 of the OpenVMS Linker Utility Manual:
+ "Finally, the ETIR$C_STC_BSR command passes the same address
+ as ETIR$C_STC_NOP (so that they will fail or succeed together),
+ and the same test is done again." */
+ if (S_GET_SEGMENT (fixP->fx_addsy) == undefined_section)
+ {
+ fixP->fx_addnumber = -value;
+ return;
+ }
+ if (value + (1u << 22) >= (1u << 23))
+ goto done;
+ else
+ {
+ /* Change to a nop. */
+ image = 0x47FF041F;
+ goto write_done;
+ }
+
+ case BFD_RELOC_SW_64_LDA:
+ /* fixup_segment sets fixP->fx_addsy to NULL when it can pre-compute
+ the value for an O_subtract. */
+ if (fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) == undefined_section)
+ {
+ fixP->fx_addnumber = symbol_get_bfdsym (fixP->fx_subsy)->value;
+ return;
+ }
+ if (value + (1u << 15) >= (1u << 16))
+ goto done;
+ else
+ {
+ /* Change to an ldi. */
+ image = 0x237B0000 | (value & 0xFFFF);
+ goto write_done;
+ }
+
+ case BFD_RELOC_SW_64_BSR:
+ case BFD_RELOC_SW_64_BOH:
+ value -= 4; /* PC-relative, base is call+4. */
+
+ /* See comment in the BFD_RELOC_SW_64_NOP case above. */
+ if (S_GET_SEGMENT (fixP->fx_addsy) == undefined_section)
+ {
+ fixP->fx_addnumber = -value;
+ return;
+ }
+ if (value + (1u << 22) >= (1u << 23))
+ {
+ /* Out of range. */
+ if (fixP->fx_r_type == BFD_RELOC_SW_64_BOH)
+ {
+ /* Add a hint. */
+ image = bfd_getl32 (fixpos);
+ image = (image & ~0x3FFF) | ((value >> 2) & 0x3FFF);
+ goto write_done;
+ }
+ goto done;
+ }
+ else
+ {
+ /* Change to a branch. */
+ image = 0xD3400000 | ((value >> 2) & 0x1FFFFF);
+ goto write_done;
+ }
+#endif
+
+ case BFD_RELOC_VTABLE_INHERIT:
+ case BFD_RELOC_VTABLE_ENTRY:
+ return;
+
+ default: {
+ const struct sw_64_operand *operand;
+
+ if ((int) fixP->fx_r_type >= 0)
+ as_fatal (_ ("unhandled relocation type %s"),
+ bfd_get_reloc_code_name (fixP->fx_r_type));
+
+ gas_assert (-(int) fixP->fx_r_type < (int) sw_64_num_operands);
+ operand = &sw_64_operands[-(int) fixP->fx_r_type];
+
+ /* The rest of these fixups only exist internally during symbol
+ resolution and have no representation in the object file.
+ Therefore they must be completely resolved as constants. */
+
+ if (fixP->fx_addsy != 0
+ && S_GET_SEGMENT (fixP->fx_addsy) != absolute_section)
+ as_bad_where (fixP->fx_file, fixP->fx_line,
+ _ ("non-absolute expression in constant field"));
+
+ image = bfd_getl32 (fixpos);
+ image = insert_operand (image, operand, (offsetT) value, fixP->fx_file,
+ fixP->fx_line);
+ }
+ goto write_done;
+ }
+
+ if (fixP->fx_addsy != 0 || fixP->fx_pcrel != 0)
+ return;
+ else
+ {
+ as_warn_where (fixP->fx_file, fixP->fx_line, _ ("type %d reloc done?\n"),
+ (int) fixP->fx_r_type);
+ goto done;
+ }
+
+write_done:
+ md_number_to_chars (fixpos, image, 4);
+
+done:
+ fixP->fx_done = 1;
+}
+
+/* Look for a register name in the given symbol. */
+
+symbolS *
+md_undefined_symbol (char *name)
+{
+ if (*name == '$')
+ {
+ int is_float = 0, num;
+
+ switch (*++name)
+ {
+ case 'f':
+ if (name[1] == 'p' && name[2] == '\0')
+ return sw_64_register_table[AXP_REG_FP];
+ is_float = 32;
+ /* Fall through. */
+
+ case 'r':
+ if (!ISDIGIT (*++name))
+ break;
+ /* Fall through. */
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ if (name[1] == '\0')
+ num = name[0] - '0';
+ else if (name[0] != '0' && ISDIGIT (name[1]) && name[2] == '\0')
+ {
+ num = (name[0] - '0') * 10 + name[1] - '0';
+ if (num >= 32)
+ break;
+ }
+ else
+ break;
+
+ if (!sw_64_noat_on && (num + is_float) == AXP_REG_AT)
+ as_warn (_ ("Used $at without \".set noat\""));
+ return sw_64_register_table[num + is_float];
+
+ case 'a':
+ if (name[1] == 't' && name[2] == '\0')
+ {
+ if (!sw_64_noat_on)
+ as_warn (_ ("Used $at without \".set noat\""));
+ return sw_64_register_table[AXP_REG_AT];
+ }
+ break;
+
+ case 'g':
+ if (name[1] == 'p' && name[2] == '\0')
+ return sw_64_register_table[sw_64_gp_register];
+ break;
+
+ case 's':
+ if (name[1] == 'p' && name[2] == '\0')
+ return sw_64_register_table[AXP_REG_SP];
+ break;
+ }
+ }
+ return NULL;
+}
+
+#ifdef OBJ_ECOFF
+/* @@@ Magic ECOFF bits. */
+
+void
+sw_64_frob_ecoff_data (void)
+{
+ select_gp_value ();
+ /* $zero and $f31 are read-only. */
+ sw_64_gprmask &= ~1;
+ sw_64_fprmask &= ~1;
+}
+#endif
+
+/* Hook to remember a recently defined label so that the auto-align
+ code can adjust the symbol after we know what alignment will be
+ required. */
+
+void
+sw_64_define_label (symbolS *sym)
+{
+ sw_64_insn_label = sym;
+#ifdef OBJ_ELF
+ dwarf2_emit_label (sym);
+#endif
+}
+
+/* Return true if we must always emit a reloc for a type and false if
+ there is some hope of resolving it at assembly time. */
+
+int
+sw_64_force_relocation (fixS *f)
+{
+ if (sw_64_flag_relax)
+ return 1;
+
+ switch (f->fx_r_type)
+ {
+ case BFD_RELOC_SW_64_GPDISP_HI16:
+ case BFD_RELOC_SW_64_GPDISP_LO16:
+ case BFD_RELOC_SW_64_GPDISP:
+ case BFD_RELOC_SW_64_LITERAL:
+ case BFD_RELOC_SW_64_ELF_LITERAL:
+ case BFD_RELOC_SW_64_ELF_LITERAL_GOT:
+ case BFD_RELOC_SW_64_LITUSE:
+ case BFD_RELOC_GPREL16:
+ case BFD_RELOC_GPREL32:
+ case BFD_RELOC_SW_64_GPREL_HI16:
+ case BFD_RELOC_SW_64_GPREL_LO16:
+ case BFD_RELOC_SW_64_LINKAGE:
+ case BFD_RELOC_SW_64_CODEADDR:
+ case BFD_RELOC_SW_64_BRSGP:
+ case BFD_RELOC_SW_64_TLSGD:
+ case BFD_RELOC_SW_64_TLSLDM:
+ case BFD_RELOC_SW_64_GOTDTPREL16:
+ case BFD_RELOC_SW_64_DTPREL_HI16:
+ case BFD_RELOC_SW_64_DTPREL_LO16:
+ case BFD_RELOC_SW_64_DTPREL16:
+ case BFD_RELOC_SW_64_GOTTPREL16:
+ case BFD_RELOC_SW_64_TPREL_HI16:
+ case BFD_RELOC_SW_64_TPREL_LO16:
+ case BFD_RELOC_SW_64_TPREL16:
+#ifdef OBJ_EVAX
+ case BFD_RELOC_SW_64_NOP:
+ case BFD_RELOC_SW_64_BSR:
+ case BFD_RELOC_SW_64_LDA:
+ case BFD_RELOC_SW_64_BOH:
+#endif
+ return 1;
+
+ default:
+ break;
+ }
+
+ return generic_force_reloc (f);
+}
+
+/* Return true if we can partially resolve a relocation now. */
+
+int
+sw_64_fix_adjustable (fixS *f)
+{
+ /* Are there any relocation types for which we must generate a
+ reloc but we can adjust the values contained within it? */
+ switch (f->fx_r_type)
+ {
+ case BFD_RELOC_SW_64_GPDISP_HI16:
+ case BFD_RELOC_SW_64_GPDISP_LO16:
+ case BFD_RELOC_SW_64_GPDISP:
+ return 0;
+
+ case BFD_RELOC_SW_64_LITERAL:
+ case BFD_RELOC_SW_64_ELF_LITERAL:
+ case BFD_RELOC_SW_64_ELF_LITERAL_GOT:
+ case BFD_RELOC_SW_64_LITUSE:
+ case BFD_RELOC_SW_64_LINKAGE:
+ case BFD_RELOC_SW_64_CODEADDR:
+ case BFD_RELOC_SW_64_TLSREL_GOT:
+ return 1;
+
+ case BFD_RELOC_VTABLE_ENTRY:
+ case BFD_RELOC_VTABLE_INHERIT:
+ return 0;
+
+ case BFD_RELOC_GPREL16:
+ case BFD_RELOC_GPREL32:
+ case BFD_RELOC_SW_64_GPREL_HI16:
+ case BFD_RELOC_SW_64_GPREL_LO16:
+ case BFD_RELOC_23_PCREL_S2:
+ case BFD_RELOC_SW_64_BR26:
+ case BFD_RELOC_16:
+ case BFD_RELOC_32:
+ case BFD_RELOC_64:
+ case BFD_RELOC_SW_64_HINT:
+ return 1;
+
+ case BFD_RELOC_SW_64_TLSGD:
+ case BFD_RELOC_SW_64_TLSLDM:
+ case BFD_RELOC_SW_64_GOTDTPREL16:
+ case BFD_RELOC_SW_64_DTPREL_HI16:
+ case BFD_RELOC_SW_64_DTPREL_LO16:
+ case BFD_RELOC_SW_64_DTPREL16:
+ case BFD_RELOC_SW_64_GOTTPREL16:
+ case BFD_RELOC_SW_64_TPREL_HI16:
+ case BFD_RELOC_SW_64_TPREL_LO16:
+ case BFD_RELOC_SW_64_TPREL16:
+ /* ??? No idea why we can't return a reference to .tbss+10, but
+ we're preventing this in the other assemblers. Follow for now. */
+ return 0;
+
+#ifdef OBJ_ELF
+ case BFD_RELOC_SW_64_BRSGP:
+ /* If we have a BRSGP reloc to a local symbol, adjust it to BRADDR and
+ let it get resolved at assembly time. */
+ {
+ symbolS *sym = f->fx_addsy;
+ const char *name;
+ int offset = 0;
+
+ if (generic_force_reloc (f))
+ return 0;
+
+ switch (S_GET_OTHER (sym) & STO_SW_64_STD_GPLOAD)
+ {
+ case STO_SW_64_NOPV:
+ break;
+ case STO_SW_64_STD_GPLOAD:
+ offset = 8;
+ break;
+ default:
+ if (S_IS_LOCAL (sym))
+ name = "<local>";
+ else
+ name = S_GET_NAME (sym);
+ as_bad_where (
+ f->fx_file, f->fx_line,
+ _ ("!samegp reloc against symbol without .prologue: %s"), name);
+ break;
+ }
+ f->fx_r_type = BFD_RELOC_23_PCREL_S2;
+ f->fx_offset += offset;
+ return 1;
+ }
+#endif
+#ifdef OBJ_EVAX
+ case BFD_RELOC_SW_64_NOP:
+ case BFD_RELOC_SW_64_BSR:
+ case BFD_RELOC_SW_64_LDA:
+ case BFD_RELOC_SW_64_BOH:
+ return 1;
+#endif
+
+ default:
+ return 1;
+ }
+}
+
+/* Generate the BFD reloc to be stuck in the object file from the
+ fixup used internally in the assembler. */
+
+arelent *
+tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
+{
+ arelent *reloc;
+
+ reloc = XNEW (arelent);
+ reloc->sym_ptr_ptr = XNEW (asymbol *);
+ *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
+ reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
+
+ /* Make sure none of our internal relocations make it this far.
+ They'd better have been fully resolved by this point. */
+ gas_assert ((int) fixp->fx_r_type > 0);
+
+ reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
+ if (reloc->howto == NULL)
+ {
+ as_bad_where (fixp->fx_file, fixp->fx_line,
+ _ ("cannot represent `%s' relocation in object file"),
+ bfd_get_reloc_code_name (fixp->fx_r_type));
+ return NULL;
+ }
+
+ if (!fixp->fx_pcrel != !reloc->howto->pc_relative)
+ as_fatal (_ ("internal error? cannot generate `%s' relocation"),
+ bfd_get_reloc_code_name (fixp->fx_r_type));
+
+ gas_assert (!fixp->fx_pcrel == !reloc->howto->pc_relative);
+
+ reloc->addend = fixp->fx_offset;
+
+#ifdef OBJ_ECOFF
+ /* Fake out bfd_perform_relocation. sigh. */
+ /* ??? Better would be to use the special_function hook. */
+ if (fixp->fx_r_type == BFD_RELOC_SW_64_LITERAL)
+ reloc->addend = -sw_64_gp_value;
+#endif
+
+#ifdef OBJ_EVAX
+ switch (fixp->fx_r_type)
+ {
+ struct evax_private_udata_struct *udata;
+ const char *pname;
+ int pname_len;
+
+ case BFD_RELOC_SW_64_LINKAGE:
+ /* Copy the linkage index. */
+ reloc->addend = fixp->fx_addnumber;
+ break;
+
+ case BFD_RELOC_SW_64_NOP:
+ case BFD_RELOC_SW_64_BSR:
+ case BFD_RELOC_SW_64_LDA:
+ case BFD_RELOC_SW_64_BOH:
+ pname = symbol_get_bfdsym (fixp->fx_addsy)->name;
+
+ /* We need the non-suffixed name of the procedure. Beware that
+ the main symbol might be equated so look it up and take its name. */
+ pname_len = strlen (pname);
+ if (pname_len > 4 && strcmp (pname + pname_len - 4, "..en") == 0)
+ {
+ symbolS *sym;
+ char *my_pname = xmemdup0 (pname, pname_len - 4);
+ sym = symbol_find (my_pname);
+ free (my_pname);
+ if (sym == NULL)
+ abort ();
+
+ while (symbol_equated_reloc_p (sym))
+ {
+ symbolS *n = symbol_get_value_expression (sym)->X_add_symbol;
+
+ /* We must avoid looping, as that can occur with a badly
+ written program. */
+ if (n == sym)
+ break;
+ sym = n;
+ }
+ pname = symbol_get_bfdsym (sym)->name;
+ }
+
+ udata = XNEW (struct evax_private_udata_struct);
+ udata->enbsym = symbol_get_bfdsym (fixp->fx_addsy);
+ udata->bsym = symbol_get_bfdsym (fixp->tc_fix_data.info->psym);
+ udata->origname = (char *) pname;
+ udata->lkindex = ((struct evax_private_udata_struct *) symbol_get_bfdsym (
+ fixp->tc_fix_data.info->sym)
+ ->udata.p)
+ ->lkindex;
+ reloc->sym_ptr_ptr = (void *) udata;
+ reloc->addend = fixp->fx_addnumber;
+
+ default:
+ break;
+ }
+#endif
+
+ return reloc;
+}
+
+/* Parse a register name off of the input_line and return a register
+ number. Gets md_undefined_symbol above to do the register name
+ matching for us.
+
+ Only called as a part of processing the ECOFF .frame directive. */
+
+int
+tc_get_register (int frame ATTRIBUTE_UNUSED)
+{
+ int framereg = AXP_REG_SP;
+
+ SKIP_WHITESPACE ();
+ if (*input_line_pointer == '$')
+ {
+ char *s;
+ char c = get_symbol_name (&s);
+ symbolS *sym = md_undefined_symbol (s);
+
+ *strchr (s, '\0') = c;
+ if (sym && (framereg = S_GET_VALUE (sym)) <= 31)
+ goto found;
+ }
+ as_warn (_ ("frame reg expected, using $%d."), framereg);
+
+found:
+ note_gpreg (framereg);
+ return framereg;
+}
+
+/* This is called before the symbol table is processed. In order to
+ work with gcc when using mips-tfile, we must keep all local labels.
+ However, in other cases, we want to discard them. If we were
+ called with -g, but we didn't see any debugging information, it may
+ mean that gcc is smuggling debugging information through to
+ mips-tfile, in which case we must generate all local labels. */
+
+#ifdef OBJ_ECOFF
+
+void
+sw_64_frob_file_before_adjust (void)
+{
+ if (sw_64_debug != 0 && !ecoff_debugging_seen)
+ flag_keep_locals = 1;
+}
+
+#endif /* OBJ_ECOFF */
+
+/* Set up globals to generate code for the ISA or processor
+ * described by INFO. */
+
+static void
+sw_64_set_architecture (const struct sw_64_cpu_info *info)
+{
+ if (info != 0)
+ {
+ file_sw_64_arch = info->cpu;
+ sw_64_opts.arch = info->cpu;
+ sw_64_opts.isa = info->isa;
+ }
+}
+
+void
+sw_64_after_parse_args (void)
+{
+ const struct sw_64_cpu_info *arch_info = 0;
+ const struct sw_64_cpu_info *tune_info = 0;
+
+ const struct cpu_type *p;
+ if (sw_64_target_name == NULL)
+ {
+ for (p = cpu_types; p->name; ++p)
+ if (strcmp (SW_CPU_STRING_DEFAULT, p->name) == 0)
+ {
+ sw_64_target_name = p->name, sw_64_target = p->flags;
+ goto found;
+ }
+ as_bad (_ ("Unknown CPU default name `%s'"), SW_CPU_STRING_DEFAULT);
+ found:
+ gas_assert (sw_64_target);
+ }
+ sw_64_set_architecture (arch_info);
+}
+
+static const struct sw_64_cpu_info sw_64_cpu_info_table[] = {
+ /* Entries for generic ISAs. */
+ {NULL, 0, 0, 0, 0}};
+
+/* The Sw_64 has support for some VAX floating point types, as well as for
+ IEEE floating point. We consider IEEE to be the primary floating point
+ format, and sneak in the VAX floating point support here. */
+#include "config/atof-vax.c"
diff --git a/gas/config/tc-sw_64.h b/gas/config/tc-sw_64.h
new file mode 100644
index 00000000..c3acee56
--- /dev/null
+++ b/gas/config/tc-sw_64.h
@@ -0,0 +1,206 @@
+/* This file is tc-sw_64.h
+ Copyright (C) 1994-2023 Free Software Foundation, Inc.
+ Written by Ken Raeburn <raeburn@cygnus.com>.
+
+ This file is part of GAS, the GNU Assembler.
+
+ GAS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GAS is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GAS; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+#define TC_SW_64
+
+#define TARGET_BYTES_BIG_ENDIAN 0
+
+#define WORKING_DOT_WORD
+
+#define TARGET_ARCH bfd_arch_sw_64
+
+#ifdef TE_FreeBSD
+#define ELF_TARGET_FORMAT "elf64-sw_64-freebsd"
+#endif
+#ifndef ELF_TARGET_FORMAT
+#define ELF_TARGET_FORMAT "elf64-sw_64"
+#endif
+
+#define TARGET_FORMAT \
+ (OUTPUT_FLAVOR == bfd_target_ecoff_flavour ? "ecoff-littlesw_64" \
+ : OUTPUT_FLAVOR == bfd_target_elf_flavour ? ELF_TARGET_FORMAT \
+ : OUTPUT_FLAVOR == bfd_target_evax_flavour ? "vms-sw_64" \
+ : "unknown-format")
+
+#define NEED_LITERAL_POOL
+#define REPEAT_CONS_EXPRESSIONS
+
+struct fix;
+struct sw_64_reloc_tag;
+
+extern int
+sw_64_force_relocation (struct fix *);
+extern int
+sw_64_fix_adjustable (struct fix *);
+
+extern unsigned long sw_64_gprmask, sw_64_fprmask;
+extern valueT sw_64_gp_value;
+
+#define TC_FORCE_RELOCATION(FIX) sw_64_force_relocation (FIX)
+#define tc_fix_adjustable(FIX) sw_64_fix_adjustable (FIX)
+#define RELOC_REQUIRES_SYMBOL
+
+/* Values passed to md_apply_fix don't include the symbol value. */
+#define MD_APPLY_SYM_VALUE(FIX) 0
+
+#define md_convert_frag(b, s, f) as_fatal ("sw_64 convert_frag\n")
+#define md_estimate_size_before_relax(f, s) \
+ (as_fatal ("estimate_size_before_relax called"), 1)
+#define md_operand(x)
+
+#ifdef OBJ_EVAX
+#define TC_VALIDATE_FIX_SUB(FIX, SEG) 1
+
+#define tc_canonicalize_symbol_name evax_shorten_name
+
+#define TC_CONS_FIX_NEW(FRAG, OFF, LEN, EXP, RELOC) \
+ (void) RELOC, fix_new_exp (FRAG, OFF, (int) LEN, EXP, 0, \
+ LEN == 2 ? BFD_RELOC_16 \
+ : LEN == 4 ? BFD_RELOC_32 \
+ : LEN == 8 ? BFD_RELOC_64 \
+ : BFD_RELOC_SW_64_LINKAGE);
+#endif
+
+#ifdef OBJ_EVAX
+#define TC_IMPLICIT_LCOMM_ALIGNMENT(SIZE, P2VAR) (P2VAR) = 3
+#else
+#define TC_IMPLICIT_LCOMM_ALIGNMENT(size, align) \
+ do \
+ { \
+ align = 0; \
+ if (size > 1) \
+ { \
+ addressT temp = 1; \
+ while ((size & temp) == 0) \
+ ++align, temp <<= 1; \
+ } \
+ } while (0)
+#endif
+
+#define md_number_to_chars number_to_chars_littleendian
+
+extern int
+tc_get_register (int);
+extern void
+sw_64_frob_ecoff_data (void);
+
+#define tc_frob_label(sym) sw_64_define_label (sym)
+extern void
+sw_64_define_label (symbolS *);
+
+#define md_cons_align(nbytes) sw_64_cons_align (nbytes)
+extern void
+sw_64_cons_align (int);
+
+#define HANDLE_ALIGN(fragp) sw_64_handle_align (fragp)
+extern void
+sw_64_handle_align (struct frag *);
+
+#define MAX_MEM_FOR_RS_ALIGN_CODE (3 + 4 + 8)
+
+#ifdef OBJ_ECOFF
+#define tc_frob_file_before_adjust() sw_64_frob_file_before_adjust ()
+extern void
+sw_64_frob_file_before_adjust (void);
+
+#define TC_VALIDATE_FIX_SUB(FIX, SEG) \
+ ((md_register_arithmetic || (SEG) != reg_section) \
+ && ((FIX)->fx_r_type == BFD_RELOC_GPREL32 \
+ || (FIX)->fx_r_type == BFD_RELOC_GPREL16))
+#endif
+
+#define md_after_parse_args() sw_64_after_parse_args ()
+extern void
+sw_64_after_parse_args (void);
+
+#define DIFF_EXPR_OK /* foo-. gets turned into PC relative relocs. */
+
+#ifdef OBJ_ELF
+#define md_elf_section_letter sw_64_elf_section_letter
+extern bfd_vma
+sw_64_elf_section_letter (int, const char **);
+#define md_elf_section_flags sw_64_elf_section_flags
+extern flagword
+sw_64_elf_section_flags (flagword, bfd_vma, int);
+#endif
+
+/* Whether to add support for explicit !relocation_op!sequence_number. At the
+ moment, only do this for ELF, though ECOFF could use it as well. */
+
+#ifdef OBJ_ELF
+#define RELOC_OP_P
+#endif
+
+#ifndef OBJ_EVAX
+/* Before the relocations are written, reorder them, so that user
+ supplied !lituse relocations follow the appropriate !literal
+ relocations. Also convert the gas-internal relocations to the
+ appropriate linker relocations. */
+#define tc_frob_file_before_fix() sw_64_before_fix ()
+extern void
+sw_64_before_fix (void);
+#endif
+
+#ifdef OBJ_ELF
+#define md_finish sw_64_elf_md_finish
+extern void
+sw_64_elf_md_finish (void);
+#endif
+
+/* New fields for supporting explicit relocations (such as !literal to mark
+ where a pointer is loaded from the global table, and !lituse_base to track
+ all of the normal uses of that pointer). */
+
+#define TC_FIX_TYPE struct sw_64_fix_tag
+
+struct sw_64_fix_tag
+{
+ struct fix *next_reloc; /* Next !lituse or !gpdisp. */
+ struct sw_64_reloc_tag *info; /* Other members with same sequence. */
+};
+
+/* Initialize the TC_FIX_TYPE field. */
+#define TC_INIT_FIX_DATA(FIX) \
+ do \
+ { \
+ FIX->tc_fix_data.next_reloc = NULL; \
+ FIX->tc_fix_data.info = NULL; \
+ } while (0)
+
+/* Work with DEBUG5 to print fields in tc_fix_type. */
+#define TC_FIX_DATA_PRINT(STREAM, FIX) \
+ do \
+ { \
+ if (FIX->tc_fix_data.info) \
+ fprintf (STREAM, "\tinfo = 0x%lx, next_reloc = 0x%lx\n", \
+ (long) FIX->tc_fix_data.info, \
+ (long) FIX->tc_fix_data.next_reloc); \
+ } while (0)
+
+#define TARGET_USE_CFIPOP 1
+
+#define tc_cfi_frame_initial_instructions sw_64_cfi_frame_initial_instructions
+extern void
+sw_64_cfi_frame_initial_instructions (void);
+
+#define DWARF2_LINE_MIN_INSN_LENGTH 4
+#define DWARF2_DEFAULT_RETURN_COLUMN 26
+#define DWARF2_CIE_DATA_ALIGNMENT (-8)
diff --git a/gas/configure b/gas/configure
index 1fc3a54b..5ce68a75 100755
--- a/gas/configure
+++ b/gas/configure
@@ -12260,6 +12260,44 @@ _ACEOF
;;
esac
+ case ${cpu_type} in
+ sw_64 | sw_64sw*)
+ # Set sw_cpu to the name of the default CPU.
+ sw_cpu=$with_cpu
+ cpu_types=${with_cpu:2:4}
+cat >>confdefs.h <<_ACEOF
+#define SW_CPU_STRING_DEFAULT "${with_cpu}"
+_ACEOF
+
+# Check for git version if there was a github
+git_version=`git log -1 --format="%h"`
+git_short=${git_version:0:5}${cpu_types}"9"
+cat >> confdefs.h <<_ACEOF
+#define GIT_REVISION 0x$git_short
+#define TARGET_SW_64
+_ACEOF
+ ;;
+ esac
+ case ${cpu_type} in
+ sw_64)
+ # Set sw_cpu to the name of the default CPU.
+ case ${target_cpu} in
+ sw_64sw6a)
+ sw_cpu=sw6a
+ ;;
+ sw_64sw6b)
+ sw_cpu=sw6b
+ ;;
+ sw_64sw8a)
+ sw_cpu=sw8a
+ ;;
+ esac
+
+cat >>confdefs.h <<_ACEOF
+_ACEOF
+ ;;
+ esac
+
# Do we need the opcodes library?
case ${cpu_type} in
vax | tic30 | i386 | arm)
diff --git a/gas/configure.tgt b/gas/configure.tgt
index 3429f850..562a938e 100644
--- a/gas/configure.tgt
+++ b/gas/configure.tgt
@@ -49,6 +49,7 @@ case ${cpu} in
aarch64) cpu_type=aarch64 endian=little arch=aarch64;;
aarch64_be) cpu_type=aarch64 endian=big arch=aarch64;;
alpha*) cpu_type=alpha ;;
+ sw_64*) cpu_type=sw_64 ;;
am33_2.0) cpu_type=mn10300 endian=little ;;
arc*eb) cpu_type=arc endian=big ;;
arm*be|arm*b) cpu_type=arm endian=big ;;
@@ -143,6 +144,12 @@ case ${generic_target} in
alpha-*-linux-*) fmt=elf em=linux ;;
alpha-*-netbsd* | alpha-*-openbsd*) fmt=elf em=nbsd ;;
+ sw_64-*-osf*) fmt=ecoff ;;
+ sw_64-*-linux*ecoff*) fmt=ecoff ;;
+ sw_64-*-linux-*) fmt=elf em=linux ;;
+ sw_64-*-netbsd*) fmt=elf em=nbsd ;;
+ sw_64-*-openbsd*) fmt=elf em=obsd ;;
+
arc-*-elf*) fmt=elf ;;
arc*-*-linux*) fmt=elf bfd_gas=yes ;;
@@ -448,7 +455,8 @@ esac
case ${cpu_type} in
aarch64 | alpha | arm | csky | i386 | ia64 | microblaze | mips | ns32k | \
- or1k | or1knd | pdp11 | ppc | riscv | sh | sparc | z80 | z8k | loongarch)
+ or1k | or1knd | pdp11 | ppc | riscv | sh | sparc | z80 | z8k | loongarch | \
+ sw_64)
bfd_gas=yes
;;
esac
diff --git a/gas/read.c b/gas/read.c
index 826156db..c0c0cc4c 100644
--- a/gas/read.c
+++ b/gas/read.c
@@ -2472,7 +2472,7 @@ bss_alloc (symbolS *symbolP, addressT size, unsigned int align)
subsegT current_subseg = now_subseg;
segT bss_seg = bss_section;
-#if defined (TC_MIPS) || defined (TC_ALPHA)
+#if defined (TC_MIPS) || defined (TC_ALPHA) || defined (TC_SW_64)
if (OUTPUT_FLAVOR == bfd_target_ecoff_flavour
|| OUTPUT_FLAVOR == bfd_target_elf_flavour)
{
diff --git a/gas/testsuite/gas/all/gas.exp b/gas/testsuite/gas/all/gas.exp
index bab5a6c7..1db165f1 100644
--- a/gas/testsuite/gas/all/gas.exp
+++ b/gas/testsuite/gas/all/gas.exp
@@ -62,6 +62,7 @@ if { ![istarget cris-*-*] && ![istarget crisv32-*-*]
# differences of two previously undefined symbols. Hence this test will
# not pass for these targets.
if { ![istarget alpha*-*-*vms*]
+ && ![istarget sw_64*-*-*vms*]
&& ![istarget am3*-*-*]
&& ![istarget avr-*-*]
&& ![istarget ft32-*-*]
@@ -126,6 +127,8 @@ if { ![is_aout_format] } {
}
alpha-*-*linux*ecoff { }
alpha-*-osf* { }
+ sw_64-*-*linux*ecoff { }
+ sw_64-*-osf* { }
hppa*-*-hpux* { }
mep-*-* { }
mmix-*-* { }
@@ -145,6 +148,7 @@ if { ![is_aout_format] } {
# pdp11 gets unexpected reloc types.
switch -glob $target_triplet {
alpha*-*-* { }
+ sw_64*-*-* { }
am3*-*-* { }
cr16*-*-* { }
crx*-*-* { }
@@ -364,6 +368,8 @@ proc test_cond {} {
switch -glob $target_triplet {
alpha-*-linux*ecoff { }
alpha-*-osf* { }
+ sw_64-*-linux*ecoff { }
+ sw_64-*-osf* { }
hppa*-*-* { }
*c4x*-*-* { }
*c54x*-*-* { }
@@ -441,6 +447,7 @@ if { ![istarget "pdp11-*-*"] } {
# .set works differently on some targets.
switch -glob $target_triplet {
alpha*-*-* { }
+ sw_64*-*-* { }
mips*-*-* { }
*c54x*-*-* { }
z80-*-* { }
diff --git a/gas/testsuite/gas/elf/common5a.d b/gas/testsuite/gas/elf/common5a.d
index ec136b48..d5aaac4a 100644
--- a/gas/testsuite/gas/elf/common5a.d
+++ b/gas/testsuite/gas/elf/common5a.d
@@ -1,5 +1,5 @@
#source: common5a.s
#as:
#error_output: common5a.l
-#notarget: alpha-*-*
+#notarget: alpha-*-* sw_64-*-*
# The Alpha target uses its own .set pseudo-insn.
diff --git a/gas/testsuite/gas/elf/common5b.d b/gas/testsuite/gas/elf/common5b.d
index 9369c2df..a0e3a2ff 100644
--- a/gas/testsuite/gas/elf/common5b.d
+++ b/gas/testsuite/gas/elf/common5b.d
@@ -1,5 +1,5 @@
#source: common5b.s
#as:
#error_output: common5b.l
-#notarget: alpha-*-*
+#notarget: alpha-*-* sw_64-*-*
# The Alpha target uses its own .set pseudo-insn.
diff --git a/gas/testsuite/gas/elf/common5c.d b/gas/testsuite/gas/elf/common5c.d
index cbb3fc68..720f464f 100644
--- a/gas/testsuite/gas/elf/common5c.d
+++ b/gas/testsuite/gas/elf/common5c.d
@@ -1,5 +1,5 @@
#source: common5c.s
#as:
#error_output: common5a.l
-#notarget: alpha-*-*
+#notarget: alpha-*-* sw_64-*-*
# The Alpha target uses its own .set pseudo-insn.
diff --git a/gas/testsuite/gas/elf/common5d.d b/gas/testsuite/gas/elf/common5d.d
index 7b11fa24..b922fa59 100644
--- a/gas/testsuite/gas/elf/common5d.d
+++ b/gas/testsuite/gas/elf/common5d.d
@@ -1,5 +1,5 @@
#source: common5d.s
#as:
#error_output: common5b.l
-#notarget: alpha-*-*
+#notarget: alpha-*-* sw_64-*-*
# The Alpha target uses its own .set pseudo-insn.
diff --git a/gas/testsuite/gas/elf/elf.exp b/gas/testsuite/gas/elf/elf.exp
index 9e389ff1..7cd49c0e 100644
--- a/gas/testsuite/gas/elf/elf.exp
+++ b/gas/testsuite/gas/elf/elf.exp
@@ -167,6 +167,7 @@ if { [is_elf_format] } then {
}
switch -glob $target_triplet {
alpha*-*-* { }
+ sw_64*-*-* { }
am3*-*-* { }
*c54x*-*-* { }
cr16*-*-* { }
@@ -220,7 +221,7 @@ if { [is_elf_format] } then {
run_elf_list_test "section5" "" "-al -Z" "-SW" "| grep \" \\\\.test\\\[0-9\\\]\""
}
run_dump_test "struct"
- if { ![istarget "alpha*-*-*"] } then {
+ if { ![istarget "alpha*-*-*"] && ! [istarget "sw_64*-*-*"]} then {
# The alpha port uses .set for state, e.g. nomacro.
run_dump_test "symtab"
}
diff --git a/gas/testsuite/gas/elf/ifunc-1.d b/gas/testsuite/gas/elf/ifunc-1.d
index 5b2657b0..f920b977 100644
--- a/gas/testsuite/gas/elf/ifunc-1.d
+++ b/gas/testsuite/gas/elf/ifunc-1.d
@@ -1,6 +1,6 @@
#readelf: -s
#name: .set with IFUNC
-#notarget: alpha*
+#notarget: alpha* sw_64*
#...
[ ]+[0-9]+:[ ]+[0-9a-f]+[ ]+[0-9]+[ ]+IFUNC[ ]+GLOBAL[ ]+DEFAULT[ ]+[1-9] __GI_foo
diff --git a/gas/testsuite/gas/elf/size.d b/gas/testsuite/gas/elf/size.d
index 5890386a..edd76de3 100644
--- a/gas/testsuite/gas/elf/size.d
+++ b/gas/testsuite/gas/elf/size.d
@@ -1,6 +1,6 @@
#readelf: -sW
#name: ELF symbol size
-#notarget: alpha-*-* hppa*-*-hpux*
+#notarget: alpha-*-* sw_64-*-* hppa*-*-hpux*
# The Alpha target uses its own .set pseudo-insn.
#...
diff --git a/gas/write.c b/gas/write.c
index 573a667d..6ae9856d 100644
--- a/gas/write.c
+++ b/gas/write.c
@@ -120,6 +120,8 @@ struct reloc_list* reloc_list;
void print_fixup (fixS *);
+fixS * frags_pre_fixup = NULL;
+
/* We generally attach relocs to frag chains. However, after we have
chained these all together into a segment, any relocs we add after
that must be attached to a segment. This will include relocs added
@@ -199,14 +201,21 @@ fix_new_internal (fragS *frag, /* Which frag? */
*seg_fix_rootP = fixP;
if (fixP->fx_next == NULL)
*seg_fix_tailP = fixP;
+ frags_pre_fixup = NULL;
}
else
{
fixP->fx_next = NULL;
if (*seg_fix_tailP)
+ {
+ frags_pre_fixup = *seg_fix_tailP;
(*seg_fix_tailP)->fx_next = fixP;
+ }
else
+ {
+ frags_pre_fixup = NULL;
*seg_fix_rootP = fixP;
+ }
*seg_fix_tailP = fixP;
}
}
@@ -550,6 +559,56 @@ relax_seg (bfd *abfd ATTRIBUTE_UNUSED, asection *sec, void *xxx)
info->changed = 1;
}
+#ifdef TARGET_SW_64
+static void
+sort_frchain (frag_list)
+fragS *frag_list;
+{
+ fragS *fragp, *fragc, *fragi;
+ long r;
+ for (fragp = frag_list;fragp;)
+ {
+ fragc=fragp;
+ if (fragp->fr_next)
+ fragp = fragp->fr_next;
+ else
+ /* the end of the frag_list. */
+ break;
+ r = ((long)fragp->fr_address - (long)fragc->fr_address - fragc->fr_fix) ;
+ if ( r < 0)
+ {
+ /*adjust the last fill frag's address ,but don't insert back. */
+ if (fragp->fr_type == rs_fill && fragp->fr_next == 0)
+ {
+ fragp->fr_address=fragp->last_fr_address=fragc->fr_address
+ + fragc->fr_fix+fragc->fr_offset;
+ continue;
+ }
+
+ fragc->fr_next = fragp->fr_next;
+ for (fragi = frag_list; fragi != fragc; fragi=fragi->fr_next)
+ {
+ r = (fragi->fr_next->fr_address
+ - fragp->fr_address - fragp->fr_fix) ;
+ if (r >= 0)
+ {
+ fragp->fr_next = fragi->fr_next;
+ fragi->fr_next = fragp;
+ fragp = fragc;
+ break;
+ }
+ }
+ if (fragi == fragc)
+ {
+ as_bad_where (fragp->fr_file,
+ fragp->fr_line,
+ "ERR,can't insert the node!\n");
+ }
+ }
+ }
+}
+#endif
+
static void
size_seg (bfd *abfd ATTRIBUTE_UNUSED, asection *sec, void *xxx ATTRIBUTE_UNUSED)
{
@@ -562,6 +621,10 @@ size_seg (bfd *abfd ATTRIBUTE_UNUSED, asection *sec, void *xxx ATTRIBUTE_UNUSED)
subseg_change (sec, 0);
seginfo = seg_info (sec);
+#ifdef TARGET_SW_64
+ if (pal_org_backwrards)
+ sort_frchain (seginfo->frchainP->frch_root);
+#endif
if (seginfo && seginfo->frchainP)
{
for (fragp = seginfo->frchainP->frch_root; fragp; fragp = fragp->fr_next)
@@ -3018,7 +3081,13 @@ relax_segment (struct frag *segment_frag_root, segT segment, int pass)
/* Growth may be negative, but variable part of frag
cannot have fewer than 0 chars. That is, we can't
.org backwards. */
+
+#ifdef TARGET_SW_64
+ if (((offsetT) (address + fragP->fr_fix) > target) &&
+ !pal_org_backwrards)
+#else
if ((offsetT) (address + fragP->fr_fix) > target)
+#endif
{
growth = 0;
diff --git a/gprof/sw_64.c b/gprof/sw_64.c
new file mode 100644
index 00000000..5bf726ca
--- /dev/null
+++ b/gprof/sw_64.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 1983, 1993, 1998
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "gprof.h"
+#include "search_list.h"
+#include "source.h"
+#include "symtab.h"
+#include "cg_arcs.h"
+#include "corefile.h"
+#include "hist.h"
+
+/*
+ * Opcodes of the call instructions:
+ */
+#define OP_Jxx 0x1aU
+#define OP_BSR 0x34U
+
+#define Jxx_FUNC_JMP 0U
+#define Jxx_FUNC_JSR 1U
+#define Jxx_FUNC_RET 2U
+#define Jxx_FUNC_JSR_COROUTINE 3U
+
+/* *INDENT-OFF* */
+/* Here to document only. We can't use this when cross compiling as
+ the bitfield layout might not be the same as native.
+
+ typedef union
+ {
+ struct
+ {
+ unsigned other:26;
+ unsigned op_code:6;
+ }
+ a; -- any format
+ struct
+ {
+ int disp:21;
+ unsigned ra:5;
+ unsigned op_code:6;
+ }
+ b; -- branch format
+ struct
+ {
+ int hint:14;
+ unsigned func:2;
+ unsigned rb:5;
+ unsigned ra:5;
+ unsigned op_code:6;
+ }
+ j; -- jump format
+ }
+ sw_64_Instruction;
+*/
+/* *INDENT-ON* */
+
+static Sym indirect_child;
+
+void
+sw_64_find_call (Sym *, bfd_vma, bfd_vma);
+
+/*
+ * On the SW_64 we can only detect PC relative calls, which are
+ * usually generated for calls to functions within the same
+ * object file only. This is still better than nothing, however.
+ * (In particular it should be possible to find functions that
+ * potentially call integer division routines, for example.)
+ */
+void
+sw_64_find_call (Sym *parent, bfd_vma p_lowpc, bfd_vma p_highpc)
+{
+ bfd_vma pc, dest_pc;
+ unsigned int insn;
+ Sym *child;
+
+ if (indirect_child.name == NULL)
+ {
+ sym_init (&indirect_child);
+ indirect_child.name = _ ("<indirect child>");
+ indirect_child.cg.prop.fract = 1.0;
+ indirect_child.cg.cyc.head = &indirect_child;
+ }
+
+ DBG (CALLDEBUG, printf (_ ("[find_call] %s: 0x%lx to 0x%lx\n"), parent->name,
+ (unsigned long) p_lowpc, (unsigned long) p_highpc));
+ for (pc = (p_lowpc + 3) & ~(bfd_vma) 3; pc < p_highpc; pc += 4)
+ {
+ insn = bfd_get_32 (core_bfd, ((unsigned char *) core_text_space + pc
+ - core_text_sect->vma));
+ switch (insn & (0x3fU << 26))
+ {
+ case OP_Jxx << 26:
+ /* There is no simple and reliable way to determine the
+ * target of a jsr (the hint bits help, but there aren't
+ * enough bits to get a satisfactory hit rate). Instead,
+ * for any indirect jump we simply add an arc from PARENT
+ * to INDIRECT_CHILD---that way the user it at least able
+ * to see that there are other calls as well. */
+ if ((insn & (3 << 14)) == Jxx_FUNC_JSR << 14
+ || (insn & (3 << 14)) == Jxx_FUNC_JSR_COROUTINE << 14)
+ {
+ DBG (CALLDEBUG,
+ printf (_ ("[find_call] 0x%lx: jsr%s <indirect_child>\n"),
+ (unsigned long) pc,
+ ((insn & (3 << 14)) == Jxx_FUNC_JSR << 14
+ ? ""
+ : "_coroutine")));
+ arc_add (parent, &indirect_child, (unsigned long) 0);
+ }
+ break;
+
+ case OP_BSR << 26:
+ DBG (CALLDEBUG,
+ printf (_ ("[find_call] 0x%lx: bsr"), (unsigned long) pc));
+ /* Regular PC relative addressing. Check that this is the
+ * address of a function. The linker sometimes redirects
+ * the entry point by 8 bytes to skip loading the global
+ * pointer, so we allow for either address. */
+ dest_pc
+ = pc + 4
+ + (((bfd_signed_vma) (insn & 0x1fffff) ^ 0x100000) - 0x100000);
+ if (hist_check_address (dest_pc))
+ {
+ child = sym_lookup (&symtab, dest_pc);
+ if (child)
+ {
+ DBG (CALLDEBUG, printf (" 0x%lx\t; name=%s, addr=0x%lx",
+ (unsigned long) dest_pc, child->name,
+ (unsigned long) child->addr));
+ if (child->addr == dest_pc || child->addr == dest_pc - 8)
+ {
+ DBG (CALLDEBUG, printf ("\n"));
+ /* a hit: */
+ arc_add (parent, child, (unsigned long) 0);
+ continue;
+ }
+ }
+ }
+ /* Something funny going on. */
+ DBG (CALLDEBUG, printf ("\tbut it's a botch\n"));
+ break;
+
+ default:
+ break;
+ }
+ }
+}
diff --git a/include/bfdlink.h b/include/bfdlink.h
index 840790a2..b40de75a 100644
--- a/include/bfdlink.h
+++ b/include/bfdlink.h
@@ -733,6 +733,7 @@ struct bfd_link_info
/* The maximum cache size. Backend can use cache_size and and
max_cache_size to decide if keep_memory should be honored. */
bfd_size_type max_cache_size;
+ long flag_sw_lbr;
};
/* Some forward-definitions used by some callbacks. */
diff --git a/include/coff/ecoff.h b/include/coff/ecoff.h
index 991d92f9..488db08c 100644
--- a/include/coff/ecoff.h
+++ b/include/coff/ecoff.h
@@ -47,6 +47,14 @@
/* A compressed version of an ALPHA_MAGIC file created by DEC's tools. */
#define ALPHA_MAGIC_COMPRESSED 0x188
+#ifdef TARGET_SW_64
+/* Sw_64 magic numbers used in filehdr. */
+#define SW_64_MAGIC 0x184
+#define SW_64_MAGIC_BSD 0x187
+/* A compressed version of an SW_64_MAGIC file created by DEC's tools. */
+#define SW_64_MAGIC_COMPRESSED 0x189
+#endif
+
/* Magic numbers used in a.out header. */
#define ECOFF_AOUT_OMAGIC 0407 /* not demand paged (ld -N). */
#define ECOFF_AOUT_ZMAGIC 0413 /* demand load format, eg normal ld output */
diff --git a/include/coff/pe.h b/include/coff/pe.h
index 6b26d533..79e349a5 100644
--- a/include/coff/pe.h
+++ b/include/coff/pe.h
@@ -134,6 +134,10 @@
#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000
#define IMAGE_FILE_MACHINE_ALPHA 0x0184
#define IMAGE_FILE_MACHINE_ALPHA64 0x0284
+#ifdef TARGET_SW_64
+#define IMAGE_FILE_MACHINE_SW_64 0x0184
+#define IMAGE_FILE_MACHINE_SW_6464 0x0284
+#endif
#define IMAGE_FILE_MACHINE_AM33 0x01d3
#define IMAGE_FILE_MACHINE_AMD64 0x8664
#define IMAGE_FILE_MACHINE_ARM 0x01c0
diff --git a/include/coff/sw_64.h b/include/coff/sw_64.h
new file mode 100644
index 00000000..3476e45b
--- /dev/null
+++ b/include/coff/sw_64.h
@@ -0,0 +1,391 @@
+/* ECOFF support on Sw_64 machines.
+ coff/ecoff.h must be included before this file.
+
+ Copyright (C) 2001-2023 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/********************** FILE HEADER **********************/
+
+struct external_filehdr
+{
+ unsigned char f_magic[2]; /* magic number */
+ unsigned char f_nscns[2]; /* number of sections */
+ unsigned char f_timdat[4]; /* time & date stamp */
+ unsigned char f_symptr[8]; /* file pointer to symtab */
+ unsigned char f_nsyms[4]; /* number of symtab entries */
+ unsigned char f_opthdr[2]; /* sizeof (optional hdr) */
+ unsigned char f_flags[2]; /* flags */
+};
+
+/* Magic numbers are defined in coff/ecoff.h. */
+#define SW_64_ECOFF_BADMAG(x) \
+ ((x).f_magic != SW_64_MAGIC && (x).f_magic != SW_64_MAGIC_BSD)
+
+#define SW_64_ECOFF_COMPRESSEDMAG(x) ((x).f_magic == SW_64_MAGIC_COMPRESSED)
+
+/* The object type is encoded in the f_flags. */
+#define F_SW_64_OBJECT_TYPE_MASK 0x3000
+#define F_SW_64_NO_SHARED 0x1000
+#define F_SW_64_SHARABLE 0x2000
+#define F_SW_64_CALL_SHARED 0x3000
+
+#define FILHDR struct external_filehdr
+#define FILHSZ 24
+
+/********************** AOUT "OPTIONAL HEADER" **********************/
+
+typedef struct external_aouthdr
+{
+ unsigned char magic[2]; /* type of file. */
+ unsigned char vstamp[2]; /* version stamp. */
+ unsigned char bldrev[2]; /* ?? */
+ unsigned char padding[2]; /* pad to quadword boundary. */
+ unsigned char tsize[8]; /* text size in bytes. */
+ unsigned char dsize[8]; /* initialized data " " */
+ unsigned char bsize[8]; /* uninitialized data " "*/
+ unsigned char entry[8]; /* entry pt. */
+ unsigned char text_start[8]; /* base of text used for this file. */
+ unsigned char data_start[8]; /* base of data used for this file. */
+ unsigned char bss_start[8]; /* base of bss used for this file. */
+ unsigned char gprmask[4]; /* bitmask of general registers used. */
+ unsigned char fprmask[4]; /* bitmask of floating point registers used. */
+ unsigned char gp_value[8]; /* value for gp register. */
+} AOUTHDR;
+
+/* compute size of a header */
+
+#define AOUTSZ 80
+#define AOUTHDRSZ 80
+
+/********************** SECTION HEADER **********************/
+
+struct external_scnhdr
+{
+ unsigned char s_name[8]; /* section name */
+ unsigned char s_paddr[8]; /* physical address, aliased s_nlib */
+ unsigned char s_vaddr[8]; /* virtual address */
+ unsigned char s_size[8]; /* section size */
+ unsigned char s_scnptr[8]; /* file ptr to raw data for section */
+ unsigned char s_relptr[8]; /* file ptr to relocation */
+ unsigned char s_lnnoptr[8]; /* file ptr to line numbers */
+ unsigned char s_nreloc[2]; /* number of relocation entries */
+ unsigned char s_nlnno[2]; /* number of line number entries*/
+ unsigned char s_flags[4]; /* flags */
+};
+
+#define SCNHDR struct external_scnhdr
+#define SCNHSZ 64
+
+/********************** RELOCATION DIRECTIVES **********************/
+
+struct external_reloc
+{
+ unsigned char r_vaddr[8];
+ unsigned char r_symndx[4];
+ unsigned char r_bits[4];
+};
+
+#define RELOC struct external_reloc
+#define RELSZ 16
+
+/* Constants to unpack the r_bits field. The Sw_64 seems to always be
+ little endian, so I haven't bothered to define big endian variants
+ of these. */
+
+#define RELOC_BITS0_TYPE_LITTLE 0xff
+#define RELOC_BITS0_TYPE_SH_LITTLE 0
+
+#define RELOC_BITS1_EXTERN_LITTLE 0x01
+
+#define RELOC_BITS1_OFFSET_LITTLE 0x7e
+#define RELOC_BITS1_OFFSET_SH_LITTLE 1
+
+#define RELOC_BITS1_RESERVED_LITTLE 0x80
+#define RELOC_BITS1_RESERVED_SH_LITTLE 7
+#define RELOC_BITS2_RESERVED_LITTLE 0xff
+#define RELOC_BITS2_RESERVED_SH_LEFT_LITTLE 1
+#define RELOC_BITS3_RESERVED_LITTLE 0x03
+#define RELOC_BITS3_RESERVED_SH_LEFT_LITTLE 9
+
+#define RELOC_BITS3_SIZE_LITTLE 0xfc
+#define RELOC_BITS3_SIZE_SH_LITTLE 2
+
+/* The r_type field in a reloc is one of the following values. */
+#define SW_64_R_IGNORE 0
+#define SW_64_R_REFLONG 1
+#define SW_64_R_REFQUAD 2
+#define SW_64_R_GPREL32 3
+#define SW_64_R_LITERAL 4
+#define SW_64_R_LITUSE 5
+#define SW_64_R_GPDISP 6
+#define SW_64_R_BRADDR 7
+#define SW_64_R_HINT 8
+#define SW_64_R_SREL16 9
+#define SW_64_R_SREL32 10
+#define SW_64_R_SREL64 11
+#define SW_64_R_OP_PUSH 12
+#define SW_64_R_OP_STORE 13
+#define SW_64_R_OP_PSUB 14
+#define SW_64_R_OP_PRSHIFT 15
+#define SW_64_R_GPVALUE 16
+#define SW_64_R_GPRELHIGH 17
+#define SW_64_R_GPRELLOW 18
+#define SW_64_R_IMMED 19
+#define SW_64_R_BR26ADDR 20
+
+/* Overloaded reloc value used by Net- and OpenBSD. */
+#define SW_64_R_LITERALSLEAZY 17
+
+/* With SW_64_R_LITUSE, the r_size field is one of the following values. */
+#define SW_64_R_LU_BASE 1
+#define SW_64_R_LU_BYTOFF 2
+#define SW_64_R_LU_JSR 3
+
+/* With SW_64_R_IMMED, the r_size field is one of the following values. */
+#define SW_64_R_IMMED_GP_16 1
+#define SW_64_R_IMMED_GP_HI32 2
+#define SW_64_R_IMMED_SCN_HI32 3
+#define SW_64_R_IMMED_BR_HI32 4
+#define SW_64_R_IMMED_LO32 5
+
+/********************** SYMBOLIC INFORMATION **********************/
+
+/* ECOFF uses COFF-like section structures, but its own symbol format.
+ This file defines the symbol format in fields whose size and alignment
+ will not vary on different host systems. */
+
+/* File header as a set of bytes. */
+
+struct hdr_ext
+{
+ unsigned char h_magic[2];
+ unsigned char h_vstamp[2];
+ unsigned char h_ilineMax[4];
+ unsigned char h_idnMax[4];
+ unsigned char h_ipdMax[4];
+ unsigned char h_isymMax[4];
+ unsigned char h_ioptMax[4];
+ unsigned char h_iauxMax[4];
+ unsigned char h_issMax[4];
+ unsigned char h_issExtMax[4];
+ unsigned char h_ifdMax[4];
+ unsigned char h_crfd[4];
+ unsigned char h_iextMax[4];
+ unsigned char h_cbLine[8];
+ unsigned char h_cbLineOffset[8];
+ unsigned char h_cbDnOffset[8];
+ unsigned char h_cbPdOffset[8];
+ unsigned char h_cbSymOffset[8];
+ unsigned char h_cbOptOffset[8];
+ unsigned char h_cbAuxOffset[8];
+ unsigned char h_cbSsOffset[8];
+ unsigned char h_cbSsExtOffset[8];
+ unsigned char h_cbFdOffset[8];
+ unsigned char h_cbRfdOffset[8];
+ unsigned char h_cbExtOffset[8];
+};
+
+/* File descriptor external record. */
+
+struct fdr_ext
+{
+ unsigned char f_adr[8];
+ unsigned char f_cbLineOffset[8];
+ unsigned char f_cbLine[8];
+ unsigned char f_cbSs[8];
+ unsigned char f_rss[4];
+ unsigned char f_issBase[4];
+ unsigned char f_isymBase[4];
+ unsigned char f_csym[4];
+ unsigned char f_ilineBase[4];
+ unsigned char f_cline[4];
+ unsigned char f_ioptBase[4];
+ unsigned char f_copt[4];
+ unsigned char f_ipdFirst[4];
+ unsigned char f_cpd[4];
+ unsigned char f_iauxBase[4];
+ unsigned char f_caux[4];
+ unsigned char f_rfdBase[4];
+ unsigned char f_crfd[4];
+ unsigned char f_bits1[1];
+ unsigned char f_bits2[3];
+ unsigned char f_padding[4];
+};
+
+#define FDR_BITS1_LANG_BIG 0xF8
+#define FDR_BITS1_LANG_SH_BIG 3
+#define FDR_BITS1_LANG_LITTLE 0x1F
+#define FDR_BITS1_LANG_SH_LITTLE 0
+
+#define FDR_BITS1_FMERGE_BIG 0x04
+#define FDR_BITS1_FMERGE_LITTLE 0x20
+
+#define FDR_BITS1_FREADIN_BIG 0x02
+#define FDR_BITS1_FREADIN_LITTLE 0x40
+
+#define FDR_BITS1_FBIGENDIAN_BIG 0x01
+#define FDR_BITS1_FBIGENDIAN_LITTLE 0x80
+
+#define FDR_BITS2_GLEVEL_BIG 0xC0
+#define FDR_BITS2_GLEVEL_SH_BIG 6
+#define FDR_BITS2_GLEVEL_LITTLE 0x03
+#define FDR_BITS2_GLEVEL_SH_LITTLE 0
+
+/* We ignore the `reserved' field in bits2. */
+
+/* Procedure descriptor external record. */
+
+struct pdr_ext
+{
+ unsigned char p_adr[8];
+ unsigned char p_cbLineOffset[8];
+ unsigned char p_isym[4];
+ unsigned char p_iline[4];
+ unsigned char p_regmask[4];
+ unsigned char p_regoffset[4];
+ unsigned char p_iopt[4];
+ unsigned char p_fregmask[4];
+ unsigned char p_fregoffset[4];
+ unsigned char p_frameoffset[4];
+ unsigned char p_lnLow[4];
+ unsigned char p_lnHigh[4];
+ unsigned char p_gp_prologue[1];
+ unsigned char p_bits1[1];
+ unsigned char p_bits2[1];
+ unsigned char p_localoff[1];
+ unsigned char p_framereg[2];
+ unsigned char p_pcreg[2];
+};
+
+#define PDR_BITS1_GP_USED_BIG 0x80
+#define PDR_BITS1_REG_FRAME_BIG 0x40
+#define PDR_BITS1_PROF_BIG 0x20
+#define PDR_BITS1_RESERVED_BIG 0x1f
+#define PDR_BITS1_RESERVED_SH_LEFT_BIG 8
+#define PDR_BITS2_RESERVED_BIG 0xff
+#define PDR_BITS2_RESERVED_SH_BIG 0
+
+#define PDR_BITS1_GP_USED_LITTLE 0x01
+#define PDR_BITS1_REG_FRAME_LITTLE 0x02
+#define PDR_BITS1_PROF_LITTLE 0x04
+#define PDR_BITS1_RESERVED_LITTLE 0xf8
+#define PDR_BITS1_RESERVED_SH_LITTLE 3
+#define PDR_BITS2_RESERVED_LITTLE 0xff
+#define PDR_BITS2_RESERVED_SH_LEFT_LITTLE 5
+
+/* Line numbers. */
+
+struct line_ext
+{
+ unsigned char l_line[4];
+};
+
+/* Symbol external record. */
+
+struct sym_ext
+{
+ unsigned char s_value[8];
+ unsigned char s_iss[4];
+ unsigned char s_bits1[1];
+ unsigned char s_bits2[1];
+ unsigned char s_bits3[1];
+ unsigned char s_bits4[1];
+};
+
+#define SYM_BITS1_ST_BIG 0xFC
+#define SYM_BITS1_ST_SH_BIG 2
+#define SYM_BITS1_ST_LITTLE 0x3F
+#define SYM_BITS1_ST_SH_LITTLE 0
+
+#define SYM_BITS1_SC_BIG 0x03
+#define SYM_BITS1_SC_SH_LEFT_BIG 3
+#define SYM_BITS1_SC_LITTLE 0xC0
+#define SYM_BITS1_SC_SH_LITTLE 6
+
+#define SYM_BITS2_SC_BIG 0xE0
+#define SYM_BITS2_SC_SH_BIG 5
+#define SYM_BITS2_SC_LITTLE 0x07
+#define SYM_BITS2_SC_SH_LEFT_LITTLE 2
+
+#define SYM_BITS2_RESERVED_BIG 0x10
+#define SYM_BITS2_RESERVED_LITTLE 0x08
+
+#define SYM_BITS2_INDEX_BIG 0x0F
+#define SYM_BITS2_INDEX_SH_LEFT_BIG 16
+#define SYM_BITS2_INDEX_LITTLE 0xF0
+#define SYM_BITS2_INDEX_SH_LITTLE 4
+
+#define SYM_BITS3_INDEX_SH_LEFT_BIG 8
+#define SYM_BITS3_INDEX_SH_LEFT_LITTLE 4
+
+#define SYM_BITS4_INDEX_SH_LEFT_BIG 0
+#define SYM_BITS4_INDEX_SH_LEFT_LITTLE 12
+
+/* External symbol external record. */
+
+struct ext_ext
+{
+ struct sym_ext es_asym;
+ unsigned char es_bits1[1];
+ unsigned char es_bits2[3];
+ unsigned char es_ifd[4];
+};
+
+#define EXT_BITS1_JMPTBL_BIG 0x80
+#define EXT_BITS1_JMPTBL_LITTLE 0x01
+
+#define EXT_BITS1_COBOL_MAIN_BIG 0x40
+#define EXT_BITS1_COBOL_MAIN_LITTLE 0x02
+
+#define EXT_BITS1_WEAKEXT_BIG 0x20
+#define EXT_BITS1_WEAKEXT_LITTLE 0x04
+
+/* Dense numbers external record. */
+
+struct dnr_ext
+{
+ unsigned char d_rfd[4];
+ unsigned char d_index[4];
+};
+
+/* Relative file descriptor. */
+
+struct rfd_ext
+{
+ unsigned char rfd[4];
+};
+
+/* Optimizer symbol external record. */
+
+struct opt_ext
+{
+ unsigned char o_bits1[1];
+ unsigned char o_bits2[1];
+ unsigned char o_bits3[1];
+ unsigned char o_bits4[1];
+ struct rndx_ext o_rndx;
+ unsigned char o_offset[4];
+};
+
+#define OPT_BITS2_VALUE_SH_LEFT_BIG 16
+#define OPT_BITS2_VALUE_SH_LEFT_LITTLE 0
+
+#define OPT_BITS3_VALUE_SH_LEFT_BIG 8
+#define OPT_BITS3_VALUE_SH_LEFT_LITTLE 8
+
+#define OPT_BITS4_VALUE_SH_LEFT_BIG 0
+#define OPT_BITS4_VALUE_SH_LEFT_LITTLE 16
diff --git a/include/elf/common.h b/include/elf/common.h
index ffa6b60b..417bcb0a 100644
--- a/include/elf/common.h
+++ b/include/elf/common.h
@@ -415,6 +415,9 @@
/* Alpha backend magic number. Written in the absence of an ABI. */
#define EM_ALPHA 0x9026
+/* Sw_64 backend magic number. Written in the absence of an ABI. */
+#define EM_SW_64 0x9916
+
/* Cygnus M32R ELF backend. Written in the absence of an ABI. */
#define EM_CYGNUS_M32R 0x9041
diff --git a/include/elf/sw_64.h b/include/elf/sw_64.h
new file mode 100644
index 00000000..26fff944
--- /dev/null
+++ b/include/elf/sw_64.h
@@ -0,0 +1,138 @@
+/* SW_64 ELF support for BFD.
+ Copyright (C) 1996-2023 Free Software Foundation, Inc.
+
+ By Eric Youngdale, <eric@aib.com>. No processor supplement available
+ for this platform.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* This file holds definitions specific to the SW_64 ELF ABI. Note
+ that most of this is not actually implemented by BFD. */
+
+#ifndef _ELF_SW_64_H
+#define _ELF_SW_64_H
+
+/* Processor specific flags for the ELF header e_flags field. */
+
+/* All addresses must be below 2GB. */
+#define EF_SW_64_32BIT 0x00000001
+
+/* All relocations needed for relaxation with code movement are present. */
+#define EF_SW_64_CANRELAX 0x00000002
+
+/* Processor specific section flags. */
+
+/* This section must be in the global data area. */
+#define SHF_SW_64_GPREL 0x10000000
+
+/* Section contains some sort of debugging information. The exact
+ format is unspecified. It's probably ECOFF symbols. */
+#define SHT_SW_64_DEBUG 0x70000001
+
+/* Section contains register usage information. */
+#define SHT_SW_64_REGINFO 0x70000002
+
+/* Special values for the st_other field in the symbol table. */
+
+#define STO_SW_64_NOPV 0x80
+#define STO_SW_64_STD_GPLOAD 0x88
+
+/* Special values for Elf64_Dyn tag. */
+#define DT_SW_64_PLTRO DT_LOPROC
+
+#include "elf/reloc-macros.h"
+
+unsigned long
+bfd_elf_sw_64_mach (flagword flags);
+
+enum
+{
+ E_SW_64_MACH_SW6A = 4,
+ E_SW_64_MACH_SW6B = 8,
+ E_SW_64_MACH_SW8A = 12,
+};
+
+#define EF_SW_64_MACH ~(0x3)
+
+#define E_SW_64_MACH_SW6A 4
+#define E_SW_64_MACH_SW6B 8
+#define E_SW_64_MACH_SW8A 12
+
+/* Sw_64 relocs. */
+START_RELOC_NUMBERS (elf_sw_64_reloc_type)
+RELOC_NUMBER (R_SW_64_NONE, 0) /* No reloc */
+RELOC_NUMBER (R_SW_64_REFLONG, 1) /* Direct 32 bit */
+RELOC_NUMBER (R_SW_64_REFQUAD, 2) /* Direct 64 bit */
+RELOC_NUMBER (R_SW_64_GPREL32, 3) /* GP relative 32 bit */
+RELOC_NUMBER (R_SW_64_LITERAL, 4) /* GP relative 16 bit w/optimization */
+RELOC_NUMBER (R_SW_64_LITUSE, 5) /* Optimization hint for LITERAL */
+RELOC_NUMBER (R_SW_64_GPDISP, 6) /* Add displacement to GP */
+RELOC_NUMBER (R_SW_64_BRADDR, 7) /* PC+4 relative 23 bit shifted */
+RELOC_NUMBER (R_SW_64_HINT, 8) /* PC+4 relative 16 bit shifted */
+RELOC_NUMBER (R_SW_64_SREL16, 9) /* PC relative 16 bit */
+RELOC_NUMBER (R_SW_64_SREL32, 10) /* PC relative 32 bit */
+RELOC_NUMBER (R_SW_64_SREL64, 11) /* PC relative 64 bit */
+
+/* Skip 12 - 16; deprecated ECOFF relocs. */
+
+RELOC_NUMBER (R_SW_64_GPRELHIGH, 17) /* GP relative 32 bit, high 16 bits */
+RELOC_NUMBER (R_SW_64_GPRELLOW, 18) /* GP relative 32 bit, low 16 bits */
+RELOC_NUMBER (R_SW_64_GPREL16, 19) /* GP relative 16 bit */
+
+/* Skip 20 - 23; deprecated ECOFF relocs. */
+
+/* These relocations are specific to shared libraries. */
+RELOC_NUMBER (R_SW_64_COPY, 24) /* Copy symbol at runtime */
+RELOC_NUMBER (R_SW_64_GLOB_DAT, 25) /* Create GOT entry */
+RELOC_NUMBER (R_SW_64_JMP_SLOT, 26) /* Create PLT entry */
+RELOC_NUMBER (R_SW_64_RELATIVE, 27) /* Adjust by program base */
+
+/* Like BRADDR, but assert that the source and target object file
+ share the same GP value, and adjust the target address for
+ STO_SW_64_STD_GPLOAD. */
+RELOC_NUMBER (R_SW_64_BRSGP, 28)
+
+/* Thread-Local Storage. */
+RELOC_NUMBER (R_SW_64_TLSGD, 29)
+RELOC_NUMBER (R_SW_64_TLSLDM, 30)
+RELOC_NUMBER (R_SW_64_DTPMOD64, 31)
+RELOC_NUMBER (R_SW_64_GOTDTPREL, 32)
+RELOC_NUMBER (R_SW_64_DTPREL64, 33)
+RELOC_NUMBER (R_SW_64_DTPRELHI, 34)
+RELOC_NUMBER (R_SW_64_DTPRELLO, 35)
+RELOC_NUMBER (R_SW_64_DTPREL16, 36)
+RELOC_NUMBER (R_SW_64_GOTTPREL, 37)
+RELOC_NUMBER (R_SW_64_TPREL64, 38)
+RELOC_NUMBER (R_SW_64_TPRELHI, 39)
+RELOC_NUMBER (R_SW_64_TPRELLO, 40)
+RELOC_NUMBER (R_SW_64_TPREL16, 41)
+RELOC_NUMBER (R_SW_64_BR26ADDR, 42)
+RELOC_NUMBER (R_SW_64_LITERAL_GOT, 43) /* GP relative 16 bit */
+RELOC_NUMBER (R_SW_64_TLSREL_GOT, 44) /* GP relative 16 bit */
+
+END_RELOC_NUMBERS (R_SW_64_max)
+
+#define LITUSE_SW_64_ADDR 0
+#define LITUSE_SW_64_BASE 1
+#define LITUSE_SW_64_BYTOFF 2
+#define LITUSE_SW_64_JSR 3
+#define LITUSE_SW_64_TLSGD 4
+#define LITUSE_SW_64_TLSLDM 5
+#define LITUSE_SW_64_JSRDIRECT 6
+
+#endif /* _ELF_SW_64_H */
diff --git a/include/longlong.h b/include/longlong.h
index 9948a587..eaaac3f3 100644
--- a/include/longlong.h
+++ b/include/longlong.h
@@ -191,6 +191,67 @@ extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype);
#endif /* __alpha_cix__ */
#endif /* __alpha */
+//__sw_64_cix__
+#if defined(__sw_64) && W_TYPE_SIZE == 64
+/* There is a bug in g++ before version 5 that
+ errors on __builtin_sw_64_umulh. */
+#if !defined(__cplusplus) || __GNUC__ >= 5
+#define umul_ppmm(ph, pl, m0, m1) \
+ do \
+ { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ (ph) = __builtin_sw_64_umulh (__m0, __m1); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define UMUL_TIME 46
+#endif /* !c++ */
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do \
+ { \
+ UDItype __r; \
+ (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+ } while (0)
+extern UDItype
+__udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype);
+#define UDIV_TIME 220
+#endif /* LONGLONG_STANDALONE */
+#ifdef __sw_64_cix__
+#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clzl (X))
+#define count_trailing_zeros(COUNT, X) ((COUNT) = __builtin_ctzl (X))
+#define COUNT_LEADING_ZEROS_0 64
+#else
+#define count_leading_zeros(COUNT, X) \
+ do \
+ { \
+ UDItype __xr = (X), __t, __a; \
+ __t = __builtin_sw_64_cmpbge (0, __xr); \
+ __a = __clz_tab[__t ^ 0xff] - 1; \
+ __t = __builtin_sw_64_extbl (__xr, __a); \
+ (COUNT) = 64 - (__clz_tab[__t] + __a * 8); \
+ } while (0)
+#define count_trailing_zeros(COUNT, X) \
+ do \
+ { \
+ UDItype __xr = (X), __t, __a; \
+ __t = __builtin_sw_64_cmpbge (0, __xr); \
+ __t = ~__t & -~__t; \
+ __a = ((__t & 0xCC) != 0) * 2; \
+ __a += ((__t & 0xF0) != 0) * 4; \
+ __a += ((__t & 0xAA) != 0); \
+ __t = __builtin_sw_64_extbl (__xr, __a); \
+ __a <<= 3; \
+ __t &= -__t; \
+ __a += ((__t & 0xCC) != 0) * 2; \
+ __a += ((__t & 0xF0) != 0) * 4; \
+ __a += ((__t & 0xAA) != 0); \
+ (COUNT) = __a; \
+ } while (0)
+#endif /* __sw_64_cix__ */
+#endif /* __sw_64 */
+// sw_64
+
#if defined (__arc__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add.f %1, %4, %5\n\tadc %0, %2, %3" \
diff --git a/include/opcode/sw_64.h b/include/opcode/sw_64.h
new file mode 100644
index 00000000..14668753
--- /dev/null
+++ b/include/opcode/sw_64.h
@@ -0,0 +1,246 @@
+/* sw_64.h -- Header file for Sw_64 opcode table
+ Copyright (C) 1996-2023 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@tamu.edu>,
+ patterned after the PPC opcode table written by Ian Lance Taylor.
+
+ This file is part of GDB, GAS, and the GNU binutils.
+
+ GDB, GAS, and the GNU binutils are free software; you can redistribute
+ them and/or modify them under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either version 3,
+ or (at your option) any later version.
+
+ GDB, GAS, and the GNU binutils are distributed in the hope that they
+ will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this file; see the file COPYING3. If not, write to the Free
+ Software Foundation, 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#ifndef OPCODE_SW_64_H
+#define OPCODE_SW_64_H
+
+/* The opcode table is an array of struct sw_64_opcode. */
+
+struct sw_64_opcode
+{
+ /* The opcode name. */
+ const char *name;
+
+ /* The opcode itself. Those bits which will be filled in with
+ operands are zeroes. */
+ unsigned opcode;
+
+ /* The opcode mask. This is used by the disassembler. This is a
+ mask containing ones indicating those bits which must match the
+ opcode field, and zeroes indicating those bits which need not
+ match (and are presumably filled in by operands). */
+ unsigned mask;
+
+ /* One bit flags for the opcode. These are primarily used to
+ indicate specific processors and environments support the
+ instructions. The defined values are listed below. */
+ unsigned flags;
+
+ /* An array of operand codes. Each code is an index into the
+ operand table. They appear in the order which the operands must
+ appear in assembly code, and are terminated by a zero. */
+
+ unsigned char operands[5];
+};
+
+/* The table itself is sorted by major opcode number, and is otherwise
+ in the order in which the disassembler should consider
+ instructions. */
+extern const struct sw_64_opcode sw_64_opcodes[];
+extern const unsigned sw_64_num_opcodes;
+
+/* Values defined for the flags field of a struct sw_64_opcode. */
+
+/* CPU Availability */
+#define AXP_OPCODE_SW6 0x0800 /* SW6 insns. */
+#define AXP_OPCODE_SW6A 0x1000 /* SW6A insns. */
+#define AXP_OPCODE_SW6B 0x2000 /* SW6B insns. */
+#define AXP_OPCODE_SW8A 0x4000 /* SW8A insns. */
+#define AXP_OPCODE_SW4E 0x8000 /* SW4E insns. */
+
+#define AXP_OPCODE_NOPAL \
+ ((AXP_OPCODE_SW6 | AXP_OPCODE_SW6A | AXP_OPCODE_SW6B | AXP_OPCODE_SW8A \
+ | AXP_OPCODE_SW4E))
+
+/* A macro to extract the major opcode from an instruction. */
+#define AXP_OP(i) (((i) >> 26) & 0x3F)
+
+#define AXP_LITOP(i) (((i) >> 26) & 0x3D)
+
+/* The total number of major opcodes. */
+#define AXP_NOPS 0x40
+
+/* The operands table is an array of struct sw_64_operand. */
+
+struct sw_64_operand
+{
+ /* The number of bits in the operand. */
+ unsigned int bits : 5;
+
+ /* How far the operand is left shifted in the instruction. */
+ unsigned int shift : 5;
+
+ /* The default relocation type for this operand. */
+ signed int default_reloc : 16;
+
+ /* One bit syntax flags. */
+ unsigned int flags : 16;
+
+ /* Insertion function. This is used by the assembler. To insert an
+ operand value into an instruction, check this field.
+
+ If it is NULL, execute
+ i |= (op & ((1 << o->bits) - 1)) << o->shift;
+ (i is the instruction which we are filling in, o is a pointer to
+ this structure, and op is the opcode value; this assumes twos
+ complement arithmetic).
+
+ If this field is not NULL, then simply call it with the
+ instruction and the operand value. It will return the new value
+ of the instruction. If the ERRMSG argument is not NULL, then if
+ the operand value is illegal, *ERRMSG will be set to a warning
+ string (the operand will be inserted in any case). If the
+ operand value is legal, *ERRMSG will be unchanged (most operands
+ can accept any value). */
+ unsigned (*insert) (unsigned instruction, int op, const char **errmsg);
+
+ /* Extraction function. This is used by the disassembler. To
+ extract this operand type from an instruction, check this field.
+
+ If it is NULL, compute
+ op = ((i) >> o->shift) & ((1 << o->bits) - 1);
+ if ((o->flags & AXP_OPERAND_SIGNED) != 0
+ && (op & (1 << (o->bits - 1))) != 0)
+ op -= 1 << o->bits;
+ (i is the instruction, o is a pointer to this structure, and op
+ is the result; this assumes twos complement arithmetic).
+
+ If this field is not NULL, then simply call it with the
+ instruction value. It will return the value of the operand. If
+ the INVALID argument is not NULL, *INVALID will be set to
+ non-zero if this operand type can not actually be extracted from
+ this operand (i.e., the instruction does not match). If the
+ operand is valid, *INVALID will not be changed. */
+ int (*extract) (unsigned instruction, int *invalid);
+};
+
+#define ISA_UNKNOWN 0 /* Gas internal use. */
+
+#define CPU_UNKNOWN 0 /* Gas internal use. */
+
+/* Elements in the table are retrieved by indexing with values from
+ the operands field of the sw_64_opcodes table. */
+
+extern const struct sw_64_operand sw_64_operands[];
+extern const unsigned sw_64_num_operands;
+
+/* Values defined for the flags field of a struct sw_64_operand. */
+
+/* Mask for selecting the type for typecheck purposes. */
+#define AXP_OPERAND_TYPECHECK_MASK \
+ (AXP_OPERAND_PARENS | AXP_OPERAND_COMMA | AXP_OPERAND_IR | AXP_OPERAND_FPR \
+ | AXP_OPERAND_RELATIVE | AXP_OPERAND_SIGNED | AXP_OPERAND_UNSIGNED)
+
+/* This operand does not actually exist in the assembler input. This
+ is used to support extended mnemonics, for which two operands fields
+ are identical. The assembler should call the insert function with
+ any op value. The disassembler should call the extract function,
+ ignore the return value, and check the value placed in the invalid
+ argument. */
+#define AXP_OPERAND_FAKE 01
+
+/* The operand should be wrapped in parentheses rather than separated
+ from the previous by a comma. This is used for the load and store
+ instructions which want their operands to look like "Ra,disp (Rb)". */
+#define AXP_OPERAND_PARENS 02
+
+/* Used in combination with PARENS, this supresses the supression of
+ the comma. This is used for "jmp Ra,(Rb),hint". */
+#define AXP_OPERAND_COMMA 04
+
+/* This operand names an integer register. */
+#define AXP_OPERAND_IR 010
+
+/* This operand names a floating point register. */
+#define AXP_OPERAND_FPR 020
+
+/* This operand is a relative branch displacement. The disassembler
+ prints these symbolically if possible. */
+#define AXP_OPERAND_RELATIVE 040
+
+/* This operand takes signed values. */
+#define AXP_OPERAND_SIGNED 0100
+
+/* This operand takes unsigned values. This exists primarily so that
+ a flags value of 0 can be treated as end-of-arguments. */
+#define AXP_OPERAND_UNSIGNED 0200
+
+/* Supress overflow detection on this field. This is used for hints. */
+#define AXP_OPERAND_NOOVERFLOW 0400
+
+/* Mask for optional argument default value. */
+#define AXP_OPERAND_OPTIONAL_MASK 07000
+
+/* This operand defaults to zero. This is used for jump hints. */
+#define AXP_OPERAND_DEFAULT_ZERO 01000
+
+/* This operand should default to the first (real) operand and is used
+ in conjunction with AXP_OPERAND_OPTIONAL. This allows
+ "and $0,3,$0" to be written as "and $0,3", etc. I don't like
+ it, but it's what DEC does. */
+#define AXP_OPERAND_DEFAULT_FIRST 02000
+
+/* Similarly, this operand should default to the second (real) operand.
+ This allows "negl $0" instead of "negl $0,$0". */
+#define AXP_OPERAND_DEFAULT_SECOND 04000
+
+/* Similarly, this operand should default to the third (real) operand.
+ * This allows "selne $0,$1,$2,$2" to be written as "selne $0,$1,$2" */
+#define AXP_OPERAND_DEFAULT_THIRD 0xa00
+
+/* Register common names. */
+
+#define AXP_REG_V0 0
+#define AXP_REG_T0 1
+#define AXP_REG_T1 2
+#define AXP_REG_T2 3
+#define AXP_REG_T3 4
+#define AXP_REG_T4 5
+#define AXP_REG_T5 6
+#define AXP_REG_T6 7
+#define AXP_REG_T7 8
+#define AXP_REG_S0 9
+#define AXP_REG_S1 10
+#define AXP_REG_S2 11
+#define AXP_REG_S3 12
+#define AXP_REG_S4 13
+#define AXP_REG_S5 14
+#define AXP_REG_FP 15
+#define AXP_REG_A0 16
+#define AXP_REG_A1 17
+#define AXP_REG_A2 18
+#define AXP_REG_A3 19
+#define AXP_REG_A4 20
+#define AXP_REG_A5 21
+#define AXP_REG_T8 22
+#define AXP_REG_T9 23
+#define AXP_REG_T10 24
+#define AXP_REG_T11 25
+#define AXP_REG_RA 26
+#define AXP_REG_PV 27
+#define AXP_REG_T12 27
+#define AXP_REG_AT 28
+#define AXP_REG_GP 29
+#define AXP_REG_SP 30
+#define AXP_REG_ZERO 31
+
+#endif /* OPCODE_SW_64_H */
diff --git a/ld/Makefile.am b/ld/Makefile.am
index c3adbb0c..fbeca376 100644
--- a/ld/Makefile.am
+++ b/ld/Makefile.am
@@ -158,6 +158,7 @@ ALL_EMULATION_SOURCES = \
eaixrs6.c \
ealpha.c \
ealphavms.c \
+ esw_64.c \
earcelf.c \
earclinux.c \
earclinux_nps.c \
@@ -432,6 +433,9 @@ ALL_64_EMULATION_SOURCES = \
eelf64alpha.c \
eelf64alpha_fbsd.c \
eelf64alpha_nbsd.c \
+ eelf64sw_64.c \
+ eelf64sw_64_fbsd.c \
+ eelf64sw_64_nbsd.c \
eelf64bmip.c \
eelf64bpf.c \
eelf64briscv.c \
@@ -656,6 +660,7 @@ $(ALL_EMULATION_SOURCES) $(ALL_64_EMULATION_SOURCES): $(GEN_DEPENDS)
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eaixrs6.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ealpha.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ealphavms.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esw_64.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earcelf.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earclinux.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earclinux_nps.Pc@am__quote@
@@ -927,6 +932,9 @@ $(ALL_EMULATION_SOURCES) $(ALL_64_EMULATION_SOURCES): $(GEN_DEPENDS)
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha_fbsd.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha_nbsd.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64_fbsd.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64_nbsd.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64bmip.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64bpf.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64briscv.Pc@am__quote@
diff --git a/ld/Makefile.in b/ld/Makefile.in
index d1a56026..32eced36 100644
--- a/ld/Makefile.in
+++ b/ld/Makefile.in
@@ -659,6 +659,7 @@ ALL_EMULATION_SOURCES = \
eaixrs6.c \
ealpha.c \
ealphavms.c \
+ esw_64.c \
earcelf.c \
earclinux.c \
earclinux_nps.c \
@@ -932,6 +933,9 @@ ALL_64_EMULATION_SOURCES = \
eelf64alpha.c \
eelf64alpha_fbsd.c \
eelf64alpha_nbsd.c \
+ eelf64sw_64.c \
+ eelf64sw_64_fbsd.c \
+ eelf64sw_64_nbsd.c \
eelf64bmip.c \
eelf64bpf.c \
eelf64briscv.c \
@@ -1277,6 +1281,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eaixrs6.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ealpha.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ealphavms.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esw_64.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earcelf.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earclinux.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earclinux_nps.Po@am__quote@
@@ -1435,6 +1440,9 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha_fbsd.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha_nbsd.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64_fbsd.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64_nbsd.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64bmip.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64bpf.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64briscv.Po@am__quote@
@@ -2373,6 +2381,7 @@ $(ALL_EMULATION_SOURCES) $(ALL_64_EMULATION_SOURCES): $(GEN_DEPENDS)
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eaixrs6.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ealpha.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ealphavms.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esw_64.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earcelf.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earclinux.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/earclinux_nps.Pc@am__quote@
@@ -2644,6 +2653,9 @@ $(ALL_EMULATION_SOURCES) $(ALL_64_EMULATION_SOURCES): $(GEN_DEPENDS)
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha_fbsd.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64alpha_nbsd.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64_fbsd.Pc@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64sw_64_nbsd.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64bmip.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64bpf.Pc@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64briscv.Pc@am__quote@
diff --git a/ld/config.in b/ld/config.in
index ad0dc6a1..769863c6 100644
--- a/ld/config.in
+++ b/ld/config.in
@@ -255,3 +255,5 @@
/* Define to 1 if you need to in order for `stat' and other things to work. */
#undef _POSIX_SOURCE
+
+#undef TARGET_SW_64
diff --git a/ld/configure b/ld/configure
index 7037dfdd..68c49c17 100755
--- a/ld/configure
+++ b/ld/configure
@@ -15663,6 +15663,13 @@ else
esac
fi
+case "${target}" in
+ sw_64-*-*)
+cat >>confdefs.h <<_ACEOF
+#define TARGET_SW_64
+_ACEOF
+ ;;
+ esac
case "${enable_default_hash_style}" in
sysv | both) ac_default_emit_sysv_hash=1 ;;
diff --git a/ld/configure.tgt b/ld/configure.tgt
index c62b9581..a9917f62 100644
--- a/ld/configure.tgt
+++ b/ld/configure.tgt
@@ -151,6 +151,22 @@ alpha*-*-openbsd*) targ_emul=elf64alpha
alpha*-*-*vms*) targ_emul=alphavms
targ_extra_ofiles=
;;
+sw_64*-*-linux-* | sw_64*-*-gnu*)
+ case "${targ}" in
+ sw_64sw6a*)
+ targ_emul=elf64sw_64 targ_extra_emuls=sw_64
+ tdir_sw_64=`echo ${targ_alias} | sed -e 's/linux\(-gnu\)*/linux\1ecoff/'` ;;
+ sw_64sw6b*)
+ targ_emul=elf64sw_64 targ_extra_emuls=sw_64
+ tdir_sw_64=`echo ${targ_alias} | sed -e 's/linux\(-gnu\)*/linux\1ecoff/'` ;;
+ sw_64sw8a*)
+ targ_emul=elf64sw_64 targ_extra_emuls=sw_64
+ tdir_sw_64=`echo ${targ_alias} | sed -e 's/linux\(-gnu\)*/linux\1ecoff/'` ;;
+ *)
+ targ_emul=elf64sw_64 targ_extra_emuls=sw_64
+ tdir_sw_64=`echo ${targ_alias} | sed -e 's/linux\(-gnu\)*/linux\1ecoff/'` ;;
+ esac
+ ;;
am33_2.0-*-linux*) targ_emul=elf32am33lin # mn10300 variant
;;
arc*-*-elf*) targ_emul=arcelf
@@ -1147,6 +1163,9 @@ alpha*-*-*)
NATIVE_LIB_DIRS='/usr/local/lib /usr/ccs/lib /lib /usr/lib'
;;
+sw_64*-*-*)
+ NATIVE_LIB_DIRS='/usr/local/lib /usr/ccs/lib /lib /usr/lib'
+ ;;
esac
case "${target}" in
diff --git a/ld/emulparams/elf64sw_64.sh b/ld/emulparams/elf64sw_64.sh
new file mode 100644
index 00000000..fb991baa
--- /dev/null
+++ b/ld/emulparams/elf64sw_64.sh
@@ -0,0 +1,38 @@
+ENTRY=_start
+SCRIPT_NAME=elf
+ELFSIZE=64
+TEMPLATE_NAME=elf
+EXTRA_EM_FILE=sw_64elf
+OUTPUT_FORMAT="elf64-sw_64"
+NO_REL_RELOCS=yes
+TEXT_START_ADDR="0x120000000"
+MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
+COMMONPAGESIZE="CONSTANT (COMMONPAGESIZE)"
+
+# for flags of elf file header
+if [ "$sw_cpu_type" = "sw_64sw6a" ]; then
+ ARCH=sw_64:4
+elif [ "$sw_cpu_type" = "sw_64sw6b" ]; then
+ ARCH=sw_64:8
+elif [ "$sw_cpu_type" = "sw_64sw8a" ]; then
+ ARCH=sw_64:12
+else
+ ARCH=sw_64
+fi
+
+MACHINE=
+GENERATE_SHLIB_SCRIPT=yes
+GENERATE_PIE_SCRIPT=yes
+
+# Yes, we want duplicate .plt sections. The linker chooses the
+# appropriate one magically in sw_64_after_open.
+PLT=".plt ${RELOCATING-0} : SPECIAL { *(.plt) }"
+DATA_PLT=yes
+TEXT_PLT=yes
+
+# Note that the number is always big-endian, thus we have to
+# reverse the digit string.
+NOP=0x5f07ff435f07ff43
+
+OTHER_READONLY_SECTIONS="
+ .reginfo ${RELOCATING-0} : { *(.reginfo) }"
diff --git a/ld/emulparams/elf64sw_64_fbsd.sh b/ld/emulparams/elf64sw_64_fbsd.sh
new file mode 100644
index 00000000..deef0352
--- /dev/null
+++ b/ld/emulparams/elf64sw_64_fbsd.sh
@@ -0,0 +1,3 @@
+source_sh ${srcdir}/emulparams/elf64sw_64.sh
+source_sh ${srcdir}/emulparams/elf_fbsd.sh
+OUTPUT_FORMAT="elf64-sw_64-freebsd"
diff --git a/ld/emulparams/elf64sw_64_nbsd.sh b/ld/emulparams/elf64sw_64_nbsd.sh
new file mode 100644
index 00000000..234df284
--- /dev/null
+++ b/ld/emulparams/elf64sw_64_nbsd.sh
@@ -0,0 +1,2 @@
+source_sh ${srcdir}/emulparams/elf64sw_64.sh
+ENTRY=__start
diff --git a/ld/emulparams/sw_64.sh b/ld/emulparams/sw_64.sh
new file mode 100644
index 00000000..17c49bd7
--- /dev/null
+++ b/ld/emulparams/sw_64.sh
@@ -0,0 +1,3 @@
+SCRIPT_NAME=sw_64
+OUTPUT_FORMAT="ecoff-littlesw_64"
+ARCH=sw_64
diff --git a/ld/emultempl/sw_64elf.em b/ld/emultempl/sw_64elf.em
new file mode 100644
index 00000000..b01baa80
--- /dev/null
+++ b/ld/emultempl/sw_64elf.em
@@ -0,0 +1,152 @@
+# This shell script emits a C file. -*- C -*-
+# Copyright (C) 2003-2023 Free Software Foundation, Inc.
+#
+# This file is part of the GNU Binutils.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+# MA 02110-1301, USA.
+#
+
+# This file is sourced from elf.em, and defines extra sw_64
+# specific routines.
+#
+fragment <<EOF
+
+#include "elf/internal.h"
+#include "elf/sw_64.h"
+#include "elf-bfd.h"
+
+static bool limit_32bit;
+
+extern bool elf64_sw_64_use_secureplt;
+
+
+/* Set the start address as in the Tru64 ld. */
+#define SW_64_TEXT_START_32BIT 0x12000000
+
+static void
+sw_64_after_open (void)
+{
+ if (bfd_get_flavour (link_info.output_bfd) == bfd_target_elf_flavour
+ && elf_object_id (link_info.output_bfd) == SW_64_ELF_DATA)
+ {
+ unsigned int num_plt;
+ lang_output_section_statement_type *os;
+ lang_output_section_statement_type *plt_os[2];
+
+ num_plt = 0;
+ for (os = (void *) lang_os_list.head;
+ os != NULL;
+ os = os->next)
+ {
+ if (os->constraint == SPECIAL && strcmp (os->name, ".plt") == 0)
+ {
+ if (num_plt < 2)
+ plt_os[num_plt] = os;
+ ++num_plt;
+ }
+ }
+
+ if (num_plt == 2)
+ {
+ plt_os[0]->constraint = elf64_sw_64_use_secureplt ? 0 : -1;
+ plt_os[1]->constraint = elf64_sw_64_use_secureplt ? -1 : 0;
+ }
+ }
+
+ gld${EMULATION_NAME}_after_open ();
+}
+
+static void
+sw_64_after_parse (void)
+{
+ link_info.relax_pass = 2;
+ if (limit_32bit
+ && !bfd_link_pic (&link_info)
+ && !bfd_link_relocatable (&link_info))
+ lang_section_start (".interp",
+ exp_binop ('+',
+ exp_intop (SW_64_TEXT_START_32BIT),
+ exp_nameop (SIZEOF_HEADERS, NULL)),
+ NULL);
+
+ ldelf_after_parse ();
+}
+
+static void
+sw_64_before_allocation (void)
+{
+ /* Call main function; we're just extending it. */
+ gld${EMULATION_NAME}_before_allocation ();
+
+ /* Add -relax if -O, not -r, and not explicitly disabled. */
+ if (link_info.optimize
+ && !bfd_link_relocatable (&link_info)
+ && ! RELAXATION_DISABLED_BY_USER)
+ ENABLE_RELAXATION;
+}
+
+static void
+sw_64_finish (void)
+{
+ if (limit_32bit)
+ elf_elfheader (link_info.output_bfd)->e_flags |= EF_SW_64_32BIT;
+
+ finish_default ();
+}
+EOF
+
+# Define some shell vars to insert bits of code into the standard elf
+# parse_args and list_options functions.
+#
+PARSE_AND_LIST_PROLOGUE='
+#define OPTION_TASO 300
+#define OPTION_SECUREPLT (OPTION_TASO + 1)
+#define OPTION_NO_SECUREPLT (OPTION_SECUREPLT + 1)
+'
+
+PARSE_AND_LIST_LONGOPTS='
+ { "taso", no_argument, NULL, OPTION_TASO },
+ { "secureplt", no_argument, NULL, OPTION_SECUREPLT },
+ { "no-secureplt", no_argument, NULL, OPTION_NO_SECUREPLT },
+'
+
+PARSE_AND_LIST_OPTIONS='
+ fprintf (file, _("\
+ --taso Load executable in the lower 31-bit addressable\n\
+ virtual address range.\n\
+ --secureplt Force PLT in text segment.\n\
+ --no-secureplt Force PLT in data segment.\n\
+"));
+'
+
+PARSE_AND_LIST_ARGS_CASES='
+ case OPTION_TASO:
+ limit_32bit = 1;
+ break;
+ case OPTION_SECUREPLT:
+ elf64_sw_64_use_secureplt = true;
+ break;
+ case OPTION_NO_SECUREPLT:
+ elf64_sw_64_use_secureplt = false;
+ break;
+'
+
+# Put these extra sw_64 routines in ld_${EMULATION_NAME}_emulation
+#
+LDEMUL_AFTER_OPEN=sw_64_after_open
+LDEMUL_AFTER_PARSE=sw_64_after_parse
+LDEMUL_BEFORE_ALLOCATION=sw_64_before_allocation
+LDEMUL_FINISH=sw_64_finish
diff --git a/ld/ldlex.h b/ld/ldlex.h
index 87cac021..65f23b54 100644
--- a/ld/ldlex.h
+++ b/ld/ldlex.h
@@ -54,6 +54,7 @@ enum option_values
OPTION_OFORMAT,
OPTION_RELAX,
OPTION_NO_RELAX,
+ OPTION_LBR,
OPTION_NO_SYMBOLIC,
OPTION_RETAIN_SYMBOLS_FILE,
OPTION_RPATH,
diff --git a/ld/lexsup.c b/ld/lexsup.c
index fe872231..6bc26a39 100644
--- a/ld/lexsup.c
+++ b/ld/lexsup.c
@@ -457,6 +457,8 @@ static const struct ld_option ld_options[] =
'\0', NULL, N_("Reduce code size by using target specific optimizations"), TWO_DASHES },
{ {"no-relax", no_argument, NULL, OPTION_NO_RELAX},
'\0', NULL, N_("Do not use relaxation techniques to reduce code size"), TWO_DASHES },
+ { {"fsw-lbr", no_argument, NULL, OPTION_LBR},
+ '\0', NULL, N_("Reduce code size by using target specific optimizations"), ONE_DASH},
{ {"retain-symbols-file", required_argument, NULL,
OPTION_RETAIN_SYMBOLS_FILE},
'\0', N_("FILE"), N_("Keep only symbols listed in FILE"), TWO_DASHES },
@@ -1286,6 +1288,9 @@ parse_args (unsigned argc, char **argv)
case OPTION_RELAX:
ENABLE_RELAXATION;
break;
+ case OPTION_LBR:
+ link_info.flag_sw_lbr=8;
+ break;
case OPTION_RETAIN_SYMBOLS_FILE:
add_keepsyms_file (optarg);
break;
diff --git a/ld/scripttempl/sw_64.sc b/ld/scripttempl/sw_64.sc
new file mode 100644
index 00000000..56020f37
--- /dev/null
+++ b/ld/scripttempl/sw_64.sc
@@ -0,0 +1,87 @@
+# Linker script for Sw_64 systems.
+# Ian Lance Taylor <ian@cygnus.com>.
+# These variables may be overridden by the emulation file. The
+# defaults are appropriate for an Sw_64 running OSF/1.
+#
+# Copyright (C) 2014-2023 Free Software Foundation, Inc.
+#
+# Copying and distribution of this file, with or without modification,
+# are permitted in any medium without royalty provided the copyright
+# notice and this notice are preserved.
+
+test -z "$ENTRY" && ENTRY=__start
+test -z "$TEXT_START_ADDR" && TEXT_START_ADDR="0x120000000 + SIZEOF_HEADERS"
+if test "x$LD_FLAG" = "xn" -o "x$LD_FLAG" = "xN"; then
+ DATA_ADDR=.
+else
+ test -z "$DATA_ADDR" && DATA_ADDR=0x140000000
+fi
+cat <<EOF
+/* Copyright (C) 2014-2023 Free Software Foundation, Inc.
+
+ Copying and distribution of this script, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. */
+
+OUTPUT_FORMAT("${OUTPUT_FORMAT}")
+${LIB_SEARCH_DIRS}
+
+${RELOCATING+ENTRY (${ENTRY})}
+
+SECTIONS
+{
+ ${RELOCATING+. = ${TEXT_START_ADDR};}
+ .text : {
+ ${RELOCATING+ _ftext = .;}
+ ${RELOCATING+ __istart = .;}
+ ${RELOCATING+ KEEP (*(SORT_NONE(.init)))}
+ ${RELOCATING+ LONG (0x6bfa8001)}
+ ${RELOCATING+ eprol = .;}
+ *(.text)
+ ${RELOCATING+ __fstart = .;}
+ ${RELOCATING+ KEEP (*(SORT_NONE(.fini)))}
+ ${RELOCATING+ LONG (0x6bfa8001)}
+ ${RELOCATING+ _etext = .;}
+ }
+ .rdata : {
+ *(.rdata)
+ }
+ .rconst : {
+ *(.rconst)
+ }
+ .pdata : {
+ ${RELOCATING+ _fpdata = .;}
+ *(.pdata)
+ }
+ ${RELOCATING+. = ${DATA_ADDR};}
+ .data : {
+ ${RELOCATING+ _fdata = .;}
+ *(.data)
+ ${CONSTRUCTING+CONSTRUCTORS}
+ }
+ .xdata : {
+ *(.xdata)
+ }
+ ${RELOCATING+ _gp = ALIGN (16) + 0x8000;}
+ .lit8 : {
+ *(.lit8)
+ }
+ .lita : {
+ *(.lita)
+ }
+ .sdata : {
+ *(.sdata)
+ }
+ ${RELOCATING+ _EDATA = .;}
+ ${RELOCATING+ _FBSS = .;}
+ .sbss : {
+ *(.sbss)
+ ${RELOCATING+*(.scommon)}
+ }
+ .bss : {
+ *(.bss)
+ ${RELOCATING+*(COMMON)}
+ }
+ ${RELOCATING+ _end = .;}
+}
+EOF
diff --git a/ld/testsuite/ld-elf/elf.exp b/ld/testsuite/ld-elf/elf.exp
index ca345e86..4d5477a7 100644
--- a/ld/testsuite/ld-elf/elf.exp
+++ b/ld/testsuite/ld-elf/elf.exp
@@ -51,7 +51,7 @@ if [istarget "*-*-hpux*"] {
set ASFLAGS "$ASFLAGS --defsym HPUX=1"
}
-if { [istarget alpha*-*-* ] } {
+if { [istarget alpha*-*-* ] || [istarget sw_64*-*-*]} {
# The compress1 test is written expecting 32-bit addresses; force the
# executable down into the low address space to match.
# ??? How can we adjust just the one testcase?
diff --git a/ld/testsuite/ld-elf/pr17550a.d b/ld/testsuite/ld-elf/pr17550a.d
index 752c3ad6..33581821 100644
--- a/ld/testsuite/ld-elf/pr17550a.d
+++ b/ld/testsuite/ld-elf/pr17550a.d
@@ -3,7 +3,7 @@
#ld: -r
#readelf: -s --wide
# Disabled on alpha because alpha has a different .set directive.
-#xfail: alpha-*-*
+#xfail: alpha-*-* sw_64-*-*
#failif
#...
diff --git a/ld/testsuite/ld-elf/pr17550b.d b/ld/testsuite/ld-elf/pr17550b.d
index 09e7ddfe..bf80e9fa 100644
--- a/ld/testsuite/ld-elf/pr17550b.d
+++ b/ld/testsuite/ld-elf/pr17550b.d
@@ -3,7 +3,7 @@
#ld: -r
#readelf: -s --wide
# Disabled on alpha because alpha has a different .set directive.
-#xfail: alpha-*-*
+#xfail: alpha-*-* sw_64-*-*
#failif
#...
diff --git a/ld/testsuite/ld-elf/pr17550c.d b/ld/testsuite/ld-elf/pr17550c.d
index 55749fc7..97c394c1 100644
--- a/ld/testsuite/ld-elf/pr17550c.d
+++ b/ld/testsuite/ld-elf/pr17550c.d
@@ -5,3 +5,6 @@
#xfail: alpha-*-* [is_generic]
# Disabled on alpha because alpha has a different .set directive.
# Generic linker targets don't support comdat group sections.
+#xfail: sw_64-*-* [is_generic]
+# Disabled on sw_64 because sw_64 has a different .set directive.
+# Generic linker targets don't support comdat group sections.
diff --git a/ld/testsuite/ld-elf/pr17550d.d b/ld/testsuite/ld-elf/pr17550d.d
index 3dd108d5..f0b2ea72 100644
--- a/ld/testsuite/ld-elf/pr17550d.d
+++ b/ld/testsuite/ld-elf/pr17550d.d
@@ -5,6 +5,9 @@
# Disabled on alpha because alpha has a different .set directive.
# Generic linker targets don't support comdat group sections.
#xfail: alpha-*-* [is_generic]
+# Disabled on sw_64 because sw_64 has a different .set directive.
+# Generic linker targets don't support comdat group sections.
+#xfail: sw_64-*-* [is_generic]
#...
+[0-9]+: +[0-9a-f]+ +0 +OBJECT +GLOBAL +DEFAULT +UND y
diff --git a/ld/testsuite/ld-elf/shared.exp b/ld/testsuite/ld-elf/shared.exp
index cf010e5b..094cc735 100644
--- a/ld/testsuite/ld-elf/shared.exp
+++ b/ld/testsuite/ld-elf/shared.exp
@@ -347,6 +347,7 @@ if { [check_gc_sections_available] } {
tic6x-*-* { }
xtensa-*-* { }
loongarch*-*-* { }
+ sw_64-*-* { }
default {
run_ld_link_tests [list \
[list \
@@ -466,7 +467,7 @@ run_ld_link_tests [list \
]
# These targets don't copy dynamic variables into .bss.
-setup_xfail "alpha-*-*" "bfin-*-*" "ia64-*-*" "xtensa-*-*" *loongarch*-*-*
+setup_xfail "alpha-*-*" "sw_64-*-*" "bfin-*-*" "ia64-*-*" "xtensa-*-*" *loongarch*-*-*
# or don't have .data.rel.ro
setup_xfail "hppa*64*-*-hpux*" "tic6x-*-*"
# or complain about relocs in read-only sections
@@ -487,7 +488,7 @@ run_ld_link_tests [list \
] {![check_relro_support]}
# LoongArch: Read-only sections, merged into text segment in normal exe.
-setup_xfail alpha-*-* xtensa-*-* loongarch*-*-*
+setup_xfail alpha-*-* sw_64-*-* xtensa-*-* loongarch*-*-*
run_ld_link_tests [list \
[list \
"pr20995-2" \
@@ -503,7 +504,7 @@ run_ld_link_tests [list \
# The next test checks that copy relocs are not used unnecessarily,
# but that is just an optimization so don't complain loudly.
setup_xfail *-*-*
-clear_xfail alpha-*-* bfin-*-linux* csky-*-* frv-*-* hppa*-*-* i?86-*-*
+clear_xfail alpha-*-* sw_64-*-* bfin-*-linux* csky-*-* frv-*-* hppa*-*-* i?86-*-*
clear_xfail ia64-*-* loongarch*-*-* microblaze-*-* powerpc*-*-* x86_64-*-*
clear_xfail xtensa-*-*
run_ld_link_tests {
@@ -914,7 +915,7 @@ run_cc_link_tests [list \
]
# pr19073.s uses .set, which has a different meaning on alpha.
-if { ![istarget alpha-*-*] } {
+if { ![istarget alpha-*-*] && ![istarget sw_64-*-*] } {
append build_tests {
{"Build pr19073a.o"
"-r -nostdlib -z noexecstack" ""
diff --git a/ld/testsuite/ld-ifunc/ifunc.exp b/ld/testsuite/ld-ifunc/ifunc.exp
index edabcc3f..f0026aaf 100644
--- a/ld/testsuite/ld-ifunc/ifunc.exp
+++ b/ld/testsuite/ld-ifunc/ifunc.exp
@@ -25,6 +25,7 @@
if { ![is_elf_format] || ![supports_gnu_osabi]
|| [istarget alpha-*-*]
+ || [istarget sw_64*-*-*]
|| [istarget arc*-*-*]
|| [istarget am33*-*-*]
|| [istarget bfin-*-*]
diff --git a/ld/testsuite/ld-shared/shared.exp b/ld/testsuite/ld-shared/shared.exp
index f56e42f6..38a08e23 100644
--- a/ld/testsuite/ld-shared/shared.exp
+++ b/ld/testsuite/ld-shared/shared.exp
@@ -56,6 +56,7 @@ if { ![istarget hppa*64*-*-hpux*] \
&& ![istarget sparc*-*-linux*] \
&& ![istarget arm*-*-linux*] \
&& ![istarget alpha*-*-linux*] \
+ && ![istarget sw_64*-*-linux*] \
&& ![is_xcoff_format] \
&& ![istarget s390*-*-linux*] \
&& ![istarget aarch64*-*-linux*] \
@@ -226,6 +227,7 @@ if ![ld_compile "$CC_FOR_TARGET $SHCFLAG" $srcdir/$subdir/main.c $tmpdir/mainnp.
setup_xfail "*-*-solaris2*"
setup_xfail "ia64-*-linux*"
setup_xfail "alpha*-*-linux*"
+ setup_xfail "sw_64*-*-linux*"
setup_xfail "powerpc64*-*-*"
if { ![istarget hppa*64*-*-linux*] } {
setup_xfail "hppa*-*-linux*"
@@ -252,6 +254,7 @@ if ![ld_compile "$CC_FOR_TARGET $SHCFLAG" $srcdir/$subdir/main.c $tmpdir/mainnp.
setup_xfail "powerpc*-*-linux*"
setup_xfail "ia64-*-linux*"
setup_xfail "alpha*-*-linux*"
+ setup_xfail "sw_64*-*-linux*"
setup_xfail "mips*-*-linux*"
if { ![istarget hppa*64*-*-linux*] } {
setup_xfail "hppa*-*-linux*"
@@ -307,6 +310,7 @@ if ![ld_compile "$CC_FOR_TARGET $SHCFLAG $picflag" $srcdir/$subdir/main.c $tmpdi
setup_xfail "*-*-solaris2*"
setup_xfail "ia64-*-linux*"
setup_xfail "alpha*-*-linux*"
+ setup_xfail "sw_64*-*-linux*"
setup_xfail "powerpc64*-*-*"
if { ![istarget hppa*64*-*-linux*] } {
setup_xfail "hppa*-*-linux*"
diff --git a/ld/testsuite/ld-srec/srec.exp b/ld/testsuite/ld-srec/srec.exp
index 466b57ea..857dc748 100644
--- a/ld/testsuite/ld-srec/srec.exp
+++ b/ld/testsuite/ld-srec/srec.exp
@@ -412,6 +412,8 @@ setup_xfail "v850*-*-elf"
# The S-record linker doesn't handle Alpha Elf relaxation.
setup_xfail "alpha*-*-elf*" "alpha*-*-linux-*" "alpha*-*-gnu*"
setup_xfail "alpha*-*-netbsd*"
+setup_xfail "sw_64*-*-elf*" "sw_64*-*-linux-*" "sw_64*-*-gnu*"
+setup_xfail "sw_64*-*-netbsd*"
# The S-record linker hasn't any hope of coping with HPPA relocs.
# Or MeP complex relocs.
@@ -473,6 +475,8 @@ setup_xfail "arm*-*-*"
setup_xfail "v850*-*-elf"
setup_xfail "alpha*-*-elf*" "alpha*-*-linux-*" "alpha*-*-gnu*"
setup_xfail "alpha*-*-netbsd*"
+setup_xfail "sw_64*-*-elf*" "sw_64*-*-linux-*" "sw_64*-*-gnu*"
+setup_xfail "sw_64*-*-netbsd*"
setup_xfail "hppa*-*-*" "mep-*-*"
setup_xfail "ia64-*-*"
setup_xfail "*-*-cygwin*" "*-*-mingw*" "*-*-pe*" "*-*-winnt*"
diff --git a/ld/testsuite/lib/ld-lib.exp b/ld/testsuite/lib/ld-lib.exp
index 91846406..72e689d3 100644
--- a/ld/testsuite/lib/ld-lib.exp
+++ b/ld/testsuite/lib/ld-lib.exp
@@ -1096,6 +1096,7 @@ proc check_gc_sections_available { } {
# advertised by ld's options.
if { [istarget alpha-*-*]
|| [istarget bpf-*-*]
+ || [istarget sw_64-*-*]
|| [istarget d30v-*-*]
|| [istarget dlx-*-*]
|| [istarget hppa*64-*-*]
diff --git a/makefile.vms b/makefile.vms
index a9e36340..2b921703 100644
--- a/makefile.vms
+++ b/makefile.vms
@@ -15,6 +15,14 @@ CC = cc
GASCC = gcc
endif
+ifeq ($(ARCH),SW_64)
+CC = gcc
+GASCC = $(CC)
+else
+CC = cc
+GASCC = gcc
+endif
+
ifeq ($(CC),cc)
CHECK-COMPILER = check_compiler
else
diff --git a/opcodes/Makefile.am b/opcodes/Makefile.am
index 578fdc05..a852edbf 100644
--- a/opcodes/Makefile.am
+++ b/opcodes/Makefile.am
@@ -94,6 +94,8 @@ TARGET64_LIBOPCODES_CFILES = \
aarch64-opc-2.c \
alpha-dis.c \
alpha-opc.c \
+ sw_64-dis.c \
+ sw_64-opc.c \
bpf-asm.c \
bpf-desc.c \
bpf-dis.c \
diff --git a/opcodes/Makefile.in b/opcodes/Makefile.in
index 2db307e8..9d5b13bf 100644
--- a/opcodes/Makefile.in
+++ b/opcodes/Makefile.in
@@ -486,6 +486,8 @@ TARGET64_LIBOPCODES_CFILES = \
aarch64-opc-2.c \
alpha-dis.c \
alpha-opc.c \
+ sw_64-dis.c \
+ sw_64-opc.c \
bpf-asm.c \
bpf-desc.c \
bpf-dis.c \
@@ -877,6 +879,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aarch64-opc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/alpha-dis.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/alpha-opc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sw_64-opc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sw_64-dis.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc-dis.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc-ext.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc-opc.Plo@am__quote@
diff --git a/opcodes/config.in b/opcodes/config.in
index 4cd636e5..ed69eba0 100644
--- a/opcodes/config.in
+++ b/opcodes/config.in
@@ -120,3 +120,5 @@
/* Define to 1 if you need to in order for `stat' and other things to work. */
#undef _POSIX_SOURCE
+
+#undef TARGET_SW_64
diff --git a/opcodes/configure b/opcodes/configure
index fa81f1d6..f0634d62 100755
--- a/opcodes/configure
+++ b/opcodes/configure
@@ -12515,6 +12515,13 @@ if test x${all_targets} = xfalse ; then
case "$arch" in
bfd_aarch64_arch) ta="$ta aarch64-asm.lo aarch64-dis.lo aarch64-opc.lo aarch64-asm-2.lo aarch64-dis-2.lo aarch64-opc-2.lo" ;;
bfd_alpha_arch) ta="$ta alpha-dis.lo alpha-opc.lo" ;;
+# ifdef TARGET_SW_64
+ bfd_sw_64_arch) ta="$ta sw_64-dis.lo sw_64-opc.lo"
+ cat >>confdefs.h <<_ACEOF
+#define TARGET_SW_64
+_ACEOF
+# endif
+ ;;
bfd_amdgcn_arch) ;;
bfd_arc_arch) ta="$ta arc-dis.lo arc-opc.lo arc-ext.lo" ;;
bfd_arm_arch) ta="$ta arm-dis.lo" ;;
diff --git a/opcodes/configure.ac b/opcodes/configure.ac
index 1beb72e8..edf691b9 100644
--- a/opcodes/configure.ac
+++ b/opcodes/configure.ac
@@ -263,6 +263,7 @@ if test x${all_targets} = xfalse ; then
case "$arch" in
bfd_aarch64_arch) ta="$ta aarch64-asm.lo aarch64-dis.lo aarch64-opc.lo aarch64-asm-2.lo aarch64-dis-2.lo aarch64-opc-2.lo" ;;
bfd_alpha_arch) ta="$ta alpha-dis.lo alpha-opc.lo" ;;
+ bfd_sw_64_arch) ta="$ta sw_64-dis.lo sw_64-opc.lo" ;;
bfd_amdgcn_arch) ;;
bfd_arc_arch) ta="$ta arc-dis.lo arc-opc.lo arc-ext.lo" ;;
bfd_arm_arch) ta="$ta arm-dis.lo" ;;
diff --git a/opcodes/configure.com b/opcodes/configure.com
index 1fd2741f..c6a2afda 100644
--- a/opcodes/configure.com
+++ b/opcodes/configure.com
@@ -44,6 +44,14 @@ $ FILES="alpha-dis,alpha-opc"
$ DEFS="""ARCH_alpha"""
$EOD
$ endif
+$ if arch.eqs."sw_64"
+$ then
+$ create build.com
+$DECK
+$ FILES="sw_64-dis,sw_64-opc"
+$ DEFS="""ARCH_sw_64"""
+$EOD
+$ endif
$!
$ append sys$input build.com
$DECK
diff --git a/opcodes/disassemble.c b/opcodes/disassemble.c
index 7a4a641c..3db14bca 100644
--- a/opcodes/disassemble.c
+++ b/opcodes/disassemble.c
@@ -27,6 +27,9 @@
#ifdef BFD64
#define ARCH_aarch64
#define ARCH_alpha
+#ifdef TARGET_SW_64
+#define ARCH_sw_64
+#endif
#define ARCH_bpf
#define ARCH_ia64
#define ARCH_loongarch
@@ -147,6 +150,13 @@ disassembler (enum bfd_architecture a,
disassemble = print_insn_alpha;
break;
#endif
+#ifdef TARGET_SW_64
+#ifdef ARCH_sw_64
+ case bfd_arch_sw_64:
+ disassemble = print_insn_sw_64;
+ break;
+#endif
+#endif
#ifdef ARCH_arc
case bfd_arch_arc:
disassemble = arc_get_disassembler (abfd);
diff --git a/opcodes/disassemble.h b/opcodes/disassemble.h
index b7474a85..06a38a66 100644
--- a/opcodes/disassemble.h
+++ b/opcodes/disassemble.h
@@ -23,6 +23,9 @@
extern int print_insn_aarch64 (bfd_vma, disassemble_info *);
extern int print_insn_alpha (bfd_vma, disassemble_info *);
+#ifdef TARGET_SW_64
+extern int print_insn_sw_64 (bfd_vma, disassemble_info *);
+#endif
extern int print_insn_avr (bfd_vma, disassemble_info *);
extern int print_insn_bfin (bfd_vma, disassemble_info *);
extern int print_insn_big_arm (bfd_vma, disassemble_info *);
diff --git a/opcodes/sw_64-dis.c b/opcodes/sw_64-dis.c
new file mode 100644
index 00000000..7e139239
--- /dev/null
+++ b/opcodes/sw_64-dis.c
@@ -0,0 +1,238 @@
+/* sw_64-dis.c -- Disassemble Sw_64 AXP instructions
+ Copyright (C) 1996-2023 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@tamu.edu>,
+ patterned after the PPC opcode handling written by Ian Lance Taylor.
+
+ This file is part of libopcodes.
+
+ This library is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ It is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this file; see the file COPYING. If not, write to the Free
+ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+#include "sysdep.h"
+#include <stdio.h>
+#include "disassemble.h"
+#include "opcode/sw_64.h"
+
+/* OSF register names. */
+
+static const char *const osf_regnames[64]
+ = {"$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7",
+ "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "fp",
+ "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23",
+ "$r24", "$r25", "ra", "$r27", "$r28", "$r29", "sp", "$r31",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31"};
+
+/* VMS register names. */
+
+static const char *const vms_regnames[64]
+ = {"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8", "R9",
+ "R10", "R11", "R12", "R13", "R14", "R15", "R16", "R17", "R18", "R19",
+ "R20", "R21", "R22", "R23", "R24", "AI", "RA", "PV", "AT", "FP",
+ "SP", "RZ", "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
+ "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15", "F16", "F17",
+ "F18", "F19", "F20", "F21", "F22", "F23", "F24", "F25", "F26", "F27",
+ "F28", "F29", "F30", "FZ"};
+
+/* Disassemble Sw_64 instructions. */
+
+int
+print_insn_sw_64 (bfd_vma memaddr, struct disassemble_info *info)
+{
+ static const struct sw_64_opcode *opcode_index[AXP_NOPS + 1];
+ const char *const *regnames;
+ const struct sw_64_opcode *opcode, *opcode_end;
+ const unsigned char *opindex;
+ unsigned insn, op, isa_mask;
+ int need_comma;
+
+ /* Initialize the majorop table the first time through. */
+ if (!opcode_index[0])
+ {
+ opcode = sw_64_opcodes;
+ opcode_end = opcode + sw_64_num_opcodes;
+
+ for (op = 0; op < AXP_NOPS; ++op)
+ {
+ opcode_index[op] = opcode;
+ if ((AXP_LITOP (opcode->opcode) != 0x10)
+ && (AXP_LITOP (opcode->opcode) != 0x11))
+ {
+ while (opcode < opcode_end && op == AXP_OP (opcode->opcode))
+ ++opcode;
+ }
+ else
+ {
+ while (opcode < opcode_end && op == AXP_LITOP (opcode->opcode))
+ ++opcode;
+ }
+ }
+ opcode_index[op] = opcode;
+ }
+
+ if (info->flavour == bfd_target_evax_flavour)
+ regnames = vms_regnames;
+ else
+ regnames = osf_regnames;
+
+ isa_mask = AXP_OPCODE_NOPAL;
+ switch (info->mach)
+ {
+ case bfd_mach_sw_64_sw6a:
+ isa_mask |= AXP_OPCODE_SW6 | AXP_OPCODE_SW6A;
+ break;
+ case bfd_mach_sw_64_sw6b:
+ isa_mask |= AXP_OPCODE_SW6 | AXP_OPCODE_SW6B;
+ break;
+ case bfd_mach_sw_64_sw8a:
+ isa_mask |= AXP_OPCODE_SW6 | AXP_OPCODE_SW8A;
+ break;
+ }
+
+ /* Read the insn into a host word. */
+ {
+ bfd_byte buffer[4];
+ int status = (*info->read_memory_func) (memaddr, buffer, 4, info);
+ if (status != 0)
+ {
+ (*info->memory_error_func) (status, memaddr, info);
+ return -1;
+ }
+ insn = bfd_getl32 (buffer);
+ }
+
+ /* Get the major opcode of the instruction. */
+
+ if ((AXP_LITOP (insn) == 0x10) || (AXP_LITOP (insn) == 0x11))
+ op = AXP_LITOP (insn);
+ else if ((AXP_OP (insn) & 0x3C) == 0x14) // logx
+ op = 0x14;
+ else
+ op = AXP_OP (insn);
+
+ /* Find the first match in the opcode table. */
+ opcode_end = opcode_index[op + 1];
+ for (opcode = opcode_index[op]; opcode < opcode_end; ++opcode)
+ {
+ if ((insn ^ opcode->opcode) & opcode->mask)
+ continue;
+
+ if (!(opcode->flags & isa_mask))
+ continue;
+
+ /* Make two passes over the operands. First see if any of them
+ have extraction functions, and, if they do, make sure the
+ instruction is valid. */
+ {
+ int invalid = 0;
+ for (opindex = opcode->operands; *opindex != 0; opindex++)
+ {
+ const struct sw_64_operand *operand = sw_64_operands + *opindex;
+ if (operand->extract)
+ (*operand->extract) (insn, &invalid);
+ }
+ if (invalid)
+ continue;
+ }
+
+ /* The instruction is valid. */
+ goto found;
+ }
+
+ /* No instruction found. */
+ (*info->fprintf_func) (info->stream, ".long %#08x", insn);
+
+ return 4;
+
+found:
+ if (!strncmp ("sys_call", opcode->name, 8))
+ {
+ if (insn & (0x1 << 25))
+ (*info->fprintf_func) (info->stream, "%s", "sys_call");
+ else
+ (*info->fprintf_func) (info->stream, "%s", "sys_call/b");
+ }
+ else
+ (*info->fprintf_func) (info->stream, "%s", opcode->name);
+
+ /* Get zz[7:6] and zz[5:0] to form truth for vlog. */
+ if (!strcmp (opcode->name, "vlog"))
+ {
+ unsigned int truth;
+ char tr[4];
+ truth = (AXP_OP (insn) & 3) << 6;
+ truth = truth | ((insn & 0xFC00) >> 10);
+ sprintf (tr, "%x", truth);
+ (*info->fprintf_func) (info->stream, "%s", tr);
+ }
+ if (opcode->operands[0] != 0)
+ (*info->fprintf_func) (info->stream, "\t");
+
+ /* Now extract and print the operands. */
+ need_comma = 0;
+ for (opindex = opcode->operands; *opindex != 0; opindex++)
+ {
+ const struct sw_64_operand *operand = sw_64_operands + *opindex;
+ int value;
+
+ /* Operands that are marked FAKE are simply ignored. We
+ already made sure that the extract function considered
+ the instruction to be valid. */
+ if ((operand->flags & AXP_OPERAND_FAKE) != 0)
+ continue;
+
+ /* Extract the value from the instruction. */
+ if (operand->extract)
+ value = (*operand->extract) (insn, (int *) NULL);
+ else
+ {
+ value = (insn >> operand->shift) & ((1 << operand->bits) - 1);
+ if (operand->flags & AXP_OPERAND_SIGNED)
+ {
+ int signbit = 1 << (operand->bits - 1);
+ value = (value ^ signbit) - signbit;
+ }
+ }
+
+ if (need_comma
+ && ((operand->flags & (AXP_OPERAND_PARENS | AXP_OPERAND_COMMA))
+ != AXP_OPERAND_PARENS))
+ {
+ (*info->fprintf_func) (info->stream, ",");
+ }
+ if (operand->flags & AXP_OPERAND_PARENS)
+ (*info->fprintf_func) (info->stream, "(");
+
+ /* Print the operand as directed by the flags. */
+ if (operand->flags & AXP_OPERAND_IR)
+ (*info->fprintf_func) (info->stream, "%s", regnames[value]);
+ else if (operand->flags & AXP_OPERAND_FPR)
+ (*info->fprintf_func) (info->stream, "%s", regnames[value + 32]);
+ else if (operand->flags & AXP_OPERAND_RELATIVE)
+ (*info->print_address_func) (memaddr + 4 + value, info);
+ else if (operand->flags & AXP_OPERAND_SIGNED)
+ (*info->fprintf_func) (info->stream, "%d", value);
+ else
+ (*info->fprintf_func) (info->stream, "%#x", value);
+
+ if (operand->flags & AXP_OPERAND_PARENS)
+ (*info->fprintf_func) (info->stream, ")");
+ need_comma = 1;
+ }
+
+ return 4;
+}
diff --git a/opcodes/sw_64-opc.c b/opcodes/sw_64-opc.c
new file mode 100644
index 00000000..f770919e
--- /dev/null
+++ b/opcodes/sw_64-opc.c
@@ -0,0 +1,1398 @@
+/* sw_64-opc.c -- Sw_64 AXP opcode list
+ Copyright (C) 1996-2023 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@cygnus.com>,
+ patterned after the PPC opcode handling written by Ian Lance Taylor.
+
+ This file is part of libopcodes.
+
+ This library is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ It is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this file; see the file COPYING. If not, write to the
+ Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+#include "sysdep.h"
+#include <stdio.h>
+#include "opcode/sw_64.h"
+#include "bfd.h"
+#include "opintl.h"
+
+/* This file holds the Sw_64 AXP opcode table. The opcode table includes
+ almost all of the extended instruction mnemonics. This permits the
+ disassembler to use them, and simplifies the assembler logic, at the
+ cost of increasing the table size. The table is strictly constant
+ data, so the compiler should be able to put it in the text segment.
+
+ This file also holds the operand table. All knowledge about inserting
+ and extracting operands from instructions is kept in this file.
+
+ The information for the base instruction set was compiled from the
+ _Sw_64 Architecture Handbook_, Digital Order Number EC-QD2KB-TE,
+ version 2.
+ */
+
+/* The RB field when it is the same as the RA field in the same insn.
+ This operand is marked fake. The insertion function just copies
+ the RA field into the RB field, and the extraction function just
+ checks that the fields are the same. */
+
+static unsigned
+insert_rba (unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | (((insn >> 21) & 0x1f) << 16);
+}
+
+static int
+extract_rba (unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+/* The same for the RC field. */
+
+static unsigned
+insert_rca (unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | ((insn >> 21) & 0x1f);
+}
+
+static int
+extract_rca (unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 21) & 0x1f) != (insn & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+static unsigned
+insert_rdc (unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | ((insn >> 5) & 0x1f);
+}
+
+static int
+extract_rdc (unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 5) & 0x1f) != (insn & 0x1f))
+ *invalid = 1;
+ return 0;
+}
+
+/* Fake arguments in which the registers must be set to ZERO. */
+
+static unsigned
+insert_za (unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | (31 << 21);
+}
+
+static int
+extract_za (unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 21) & 0x1f) != 31)
+ *invalid = 1;
+ return 0;
+}
+
+static unsigned
+insert_zb (unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | (31 << 16);
+}
+
+static int
+extract_zb (unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 16) & 0x1f) != 31)
+ *invalid = 1;
+ return 0;
+}
+
+static unsigned
+insert_zc (unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | 31;
+}
+
+static int
+extract_zc (unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && (insn & 0x1f) != 31)
+ *invalid = 1;
+ return 0;
+}
+
+static unsigned
+insert_zc2 (unsigned insn, int value ATTRIBUTE_UNUSED,
+ const char **errmsg ATTRIBUTE_UNUSED)
+{
+ return insn | 31 << 5;
+}
+
+static int
+extract_zc2 (unsigned insn, int *invalid)
+{
+ if (invalid != (int *) NULL && ((insn >> 5) & 0x1f) != 31)
+ *invalid = 1;
+ return 0;
+}
+
+/* The displacement field of a Branch format insn. */
+
+static unsigned
+insert_bdisp (unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **) NULL && (value & 3))
+ *errmsg = _ ("branch operand unaligned");
+ return insn | ((value / 4) & 0x1FFFFF);
+}
+
+static int
+extract_bdisp (unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0x1FFFFF) ^ 0x100000) - 0x100000);
+}
+
+static unsigned
+insert_bdisp26 (unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **) NULL && (value & 3))
+ *errmsg = _ ("branch operand unaligned");
+ return insn | ((value / 4) & 0x3FFFFFF);
+}
+
+static int
+extract_bdisp26 (unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0x3FFFFFF) ^ 0x2000000) - 0x2000000);
+}
+
+/* The hint field of a JMP/JSR insn. */
+
+/* sw use 16 bits hint disp. */
+static unsigned
+insert_jhint (unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **) NULL && (value & 3))
+ *errmsg = _ ("jump hint unaligned");
+ return insn | ((value / 4) & 0xFFFF);
+}
+
+static int
+extract_jhint (unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0xFFFF) ^ 0x8000) - 0x8000);
+}
+
+/* The hint field of an SW6 HW_JMP/JSR insn. */
+
+static unsigned
+insert_sw6hwjhint (unsigned insn, int value, const char **errmsg)
+{
+ if (errmsg != (const char **) NULL && (value & 3))
+ *errmsg = _ ("jump hint unaligned");
+ return insn | ((value / 4) & 0x1FFF);
+}
+
+static int
+extract_sw6hwjhint (unsigned insn, int *invalid ATTRIBUTE_UNUSED)
+{
+ return 4 * (((insn & 0x1FFF) ^ 0x1000) - 0x1000);
+}
+
+/* The operands table. */
+
+const struct sw_64_operand sw_64_operands[] = {
+/* The fields are bits, shift, insert, extract, flags. */
+/* The zero index is used to indicate end-of-list. */
+#define UNUSED 0
+ {0, 0, 0, 0, 0, 0},
+
+/* The plain integer register fields. */
+#define RA (UNUSED + 1)
+ {5, 21, 0, AXP_OPERAND_IR, 0, 0},
+#define RB (RA + 1)
+ {5, 16, 0, AXP_OPERAND_IR, 0, 0},
+#define RC (RB + 1)
+ {5, 0, 0, AXP_OPERAND_IR, 0, 0},
+
+/* The plain fp register fields. */
+#define FA (RC + 1)
+ {5, 21, 0, AXP_OPERAND_FPR, 0, 0},
+#define FB (FA + 1)
+ {5, 16, 0, AXP_OPERAND_FPR, 0, 0},
+#define FC (FB + 1)
+ {5, 0, 0, AXP_OPERAND_FPR, 0, 0},
+
+/* The integer registers when they are ZERO. */
+#define ZA (FC + 1)
+ {5, 21, 0, AXP_OPERAND_FAKE, insert_za, extract_za},
+#define ZB (ZA + 1)
+ {5, 16, 0, AXP_OPERAND_FAKE, insert_zb, extract_zb},
+#define ZC (ZB + 1)
+ {5, 0, 0, AXP_OPERAND_FAKE, insert_zc, extract_zc},
+
+/* The RB field when it needs parentheses. */
+#define PRB (ZC + 1)
+ {5, 16, 0, AXP_OPERAND_IR | AXP_OPERAND_PARENS, 0, 0},
+
+/* The RB field when it needs parentheses _and_ a preceding comma. */
+#define CPRB (PRB + 1)
+ {5, 16, 0, AXP_OPERAND_IR | AXP_OPERAND_PARENS | AXP_OPERAND_COMMA, 0, 0},
+
+/* The RB field when it must be the same as the RA field. */
+#define RBA (CPRB + 1)
+ {5, 16, 0, AXP_OPERAND_FAKE, insert_rba, extract_rba},
+
+/* The RC field when it must be the same as the RB field. */
+#define RCA (RBA + 1)
+ {5, 0, 0, AXP_OPERAND_FAKE, insert_rca, extract_rca},
+
+#define RDC (RCA + 1)
+ {5, 0, 0, AXP_OPERAND_FAKE, insert_rdc, extract_rdc},
+
+/* The RC field when it can *default* to RA. */
+#define DRC1 (RDC + 1)
+ {5, 0, 0, AXP_OPERAND_IR | AXP_OPERAND_DEFAULT_FIRST, 0, 0},
+
+/* The RC field when it can *default* to RB. */
+#define DRC2 (DRC1 + 1)
+ {5, 0, 0, AXP_OPERAND_IR | AXP_OPERAND_DEFAULT_SECOND, 0, 0},
+
+/* The RD field when it can *default* to RC. */
+#define DRC3 (DRC2 + 1)
+ {5, 0, 0, AXP_OPERAND_IR | AXP_OPERAND_DEFAULT_THIRD, 0, 0},
+
+/* The FC field when it can *default* to RA. */
+#define DFC1 (DRC3 + 1)
+ {5, 0, 0, AXP_OPERAND_FPR | AXP_OPERAND_DEFAULT_FIRST, 0, 0},
+
+/* The FC field when it can *default* to RB. */
+#define DFC2 (DFC1 + 1)
+ {5, 0, 0, AXP_OPERAND_FPR | AXP_OPERAND_DEFAULT_SECOND, 0, 0},
+
+/* The FD field when it can *default* to FC. */
+#define DFC3 (DFC2 + 1)
+ {5, 0, 0, AXP_OPERAND_FPR | AXP_OPERAND_DEFAULT_THIRD, 0, 0},
+
+/* The unsigned 8-bit literal of Operate format insns. */
+#define LIT (DFC3 + 1)
+ {8, 13, -LIT, AXP_OPERAND_UNSIGNED, 0, 0},
+
+/* The signed 16-bit displacement of Memory format insns. From here
+ we can't tell what relocation should be used, so don't use a default. */
+#define MDISP (LIT + 1)
+ {16, 0, -MDISP, AXP_OPERAND_SIGNED, 0, 0},
+
+/* The signed "23-bit" aligned displacement of Branch format insns. */
+#define BDISP (MDISP + 1)
+ {21, 0, BFD_RELOC_23_PCREL_S2, AXP_OPERAND_RELATIVE, insert_bdisp,
+ extract_bdisp},
+
+/* The 25-bit PALcode function. */
+#define PALFN (BDISP + 1)
+ {25, 0, -PALFN, AXP_OPERAND_UNSIGNED, 0, 0},
+
+/* sw jsr/ret insntructions has no function bits. */
+/* The optional signed "16-bit" aligned displacement of the JMP/JSR hint. */
+#define JMPHINT (PALFN + 1)
+ {16, 0, BFD_RELOC_SW_64_HINT,
+ AXP_OPERAND_RELATIVE | AXP_OPERAND_DEFAULT_ZERO | AXP_OPERAND_NOOVERFLOW,
+ insert_jhint, extract_jhint},
+
+/* The optional hint to RET/JSR_COROUTINE. */
+#define RETHINT (JMPHINT + 1)
+ {16, 0, -RETHINT, AXP_OPERAND_UNSIGNED | AXP_OPERAND_DEFAULT_ZERO, 0, 0},
+
+#define SW6HWDISP (RETHINT + 1)
+ {12, 0, -SW6HWDISP, AXP_OPERAND_SIGNED, 0, 0},
+
+/* The 16-bit combined index/scoreboard mask for the sw6
+ hw_m[ft]pr (pal19/pal1d) insns. */
+#define SW6HWINDEX (SW6HWDISP + 1)
+ {16, 0, -SW6HWINDEX, AXP_OPERAND_UNSIGNED, 0, 0},
+
+/* The 13-bit branch hint for the sw6 hw_jmp/jsr (pal1e) insn. */
+#define SW6HWJMPHINT (SW6HWINDEX + 1)
+ {8, 0, -SW6HWJMPHINT,
+ AXP_OPERAND_RELATIVE | AXP_OPERAND_DEFAULT_ZERO | AXP_OPERAND_NOOVERFLOW,
+ insert_sw6hwjhint, extract_sw6hwjhint},
+
+/* for the third operand of ternary operands integer insn. */
+#define R3 (SW6HWJMPHINT + 1)
+ {5, 5, 0, AXP_OPERAND_IR, 0, 0},
+
+/* The plain fp register fields. */
+#define F3 (R3 + 1)
+ {5, 5, 0, AXP_OPERAND_FPR, 0, 0},
+
+/* sw simd settle instruction lit. */
+#define FMALIT (F3 + 1)
+ {5, 5, -FMALIT, AXP_OPERAND_UNSIGNED, 0, 0}, // V1.1
+
+/* For pal to check disp which must be plus sign and less than 0x8000. */
+#define LMDISP (FMALIT + 1)
+ {15, 0, -LMDISP, AXP_OPERAND_UNSIGNED, 0, 0},
+
+#define RPIINDEX (LMDISP + 1)
+ {8, 0, -RPIINDEX, AXP_OPERAND_UNSIGNED, 0, 0},
+
+#define ATMDISP (RPIINDEX + 1)
+ {12, 0, -ATMDISP, AXP_OPERAND_SIGNED, 0, 0},
+
+#define DISP13 (ATMDISP + 1)
+ {13, 13, -DISP13, AXP_OPERAND_SIGNED, 0, 0},
+#define BDISP26 (DISP13 + 1)
+ {26, 0, BFD_RELOC_SW_64_BR26, AXP_OPERAND_RELATIVE, insert_bdisp26,
+ extract_bdisp26},
+#define DPFTH (BDISP26 + 1)
+ {5, 21, -DPFTH, AXP_OPERAND_UNSIGNED, 0, 0},
+/* Used by vshfqb. */
+#define ZC2 (DPFTH + 1)
+ {5, 5, 0, AXP_OPERAND_FAKE, insert_zc2, extract_zc2}};
+
+const unsigned sw_64_num_operands
+ = sizeof (sw_64_operands) / sizeof (*sw_64_operands);
+
+/* Macros used to form opcodes. */
+
+/* The main opcode. */
+#define OP(x) (((x) &0x3Fu) << 26)
+#define OP_MASK 0xFC000000
+
+/* Branch format instructions. */
+#define BRA_(oo) OP (oo)
+#define BRA_MASK OP_MASK
+#define BRA(oo) BRA_ (oo), BRA_MASK
+
+/* Floating point format instructions. */
+#define FP_(oo, fff) (OP (oo) | (((fff) &0xFF) << 5))
+#define FP_MASK (OP_MASK | 0x1FE0)
+#define FP(oo, fff) FP_ (oo, fff), FP_MASK
+
+#define FMA_(oo, fff) (OP (oo) | (((fff) &0x3F) << 10))
+#define FMA_MASK (OP_MASK | 0xFC00)
+#define FMA(oo, fff) FMA_ (oo, fff), FMA_MASK
+
+/* Memory format instructions. */
+#define MEM_(oo) OP (oo)
+#define MEM_MASK OP_MASK
+#define MEM(oo) MEM_ (oo), MEM_MASK
+
+/* Memory/Func Code format instructions. */
+#define MFC_(oo, ffff) (OP (oo) | ((ffff) &0xFFFF))
+#define MFC_MASK (OP_MASK | 0xFFFF)
+#define MFC(oo, ffff) MFC_ (oo, ffff), MFC_MASK
+
+/* Memory/Branch format instructions. */
+#define MBR_(oo, h) (OP (oo) | (((h) &3) << 14))
+#define MBR_MASK (OP_MASK | 0xC000)
+#define MBR(oo, h) MBR_ (oo, h), MBR_MASK
+
+/* Operate format instructions. The OPRL variant specifies a
+ literal second argument. */
+#define OPR_(oo, ff) (OP (oo) | (((ff) &0xFF) << 5))
+#define OPRL_(oo, ff) (OPR_ ((oo), (ff)))
+#define OPR_MASK (OP_MASK | 0x1FE0)
+#define OPR(oo, ff) OPR_ (oo, ff), OPR_MASK
+#define OPRL(oo, ff) OPRL_ (oo, ff), OPR_MASK
+
+/* sw ternary operands Operate format instructions. */
+#define TOPR_(oo, ff) (OP (oo) | (((ff) &0x07) << 10))
+#define TOPRL_(oo, ff) (TOPR_ ((oo), (ff)))
+#define TOPR_MASK (OP_MASK | 0x1C00)
+#define TOPR(oo, ff) TOPR_ (oo, ff), TOPR_MASK
+#define TOPRL(oo, ff) TOPRL_ (oo, ff), TOPR_MASK
+
+/* sw atom instructions. */
+#define ATMEM_(oo, h) (OP (oo) | (((h) &0xF) << 12))
+#define ATMEM_MASK (OP_MASK | 0xF000)
+#define ATMEM(oo, h) ATMEM_ (oo, h), ATMEM_MASK
+
+/* sw privilege instructions. */
+#define PRIRET_(oo, h) (OP (oo) | (((h) &0x1) << 20))
+#define PRIRET_MASK (OP_MASK | 0x100000)
+#define PRIRET(oo, h) PRIRET_ (oo, h), PRIRET_MASK
+
+/* sw rpi_rcsr,rpi_wcsr. */
+#define CSR_(oo, ff) (OP (oo) | (((ff) &0xFF) << 8))
+#define CSR_MASK (OP_MASK | 0xFF00)
+#define CSR(oo, ff) CSR_ (oo, ff), CSR_MASK
+
+/* Generic PALcode format instructions. */
+#define PCD_(oo, ff) (OP (oo) | (ff << 25))
+#define PCD_MASK OP_MASK
+#define PCD(oo, ff) PCD_ (oo, ff), PCD_MASK
+
+/* Specific PALcode instructions. */
+#define SPCD_(oo, ffff) (OP (oo) | ((ffff) &0x3FFFFFF))
+#define SPCD_MASK 0xFFFFFFFF
+#define SPCD(oo, ffff) SPCD_ (oo, ffff), SPCD_MASK
+
+/* Hardware memory (hw_{ld,st}) instructions. */
+#define SW6HWMEM_(oo, f) (OP (oo) | (((f) &0xF) << 12))
+#define SW6HWMEM_MASK (OP_MASK | 0xF000)
+#define SW6HWMEM(oo, f) SW6HWMEM_ (oo, f), SW6HWMEM_MASK
+
+#define SW6HWMBR_(oo, h) (OP (oo) | (((h) &7) << 13))
+#define SW6HWMBR_MASK (OP_MASK | 0xE000)
+#define SW6HWMBR(oo, h) SW6HWMBR_ (oo, h), SW6HWMBR_MASK
+
+#define LOGX_(oo, ff) (OP (oo) | (((ff) &0x3F) << 10))
+#define LOGX_MASK (0xF0000000)
+#define LOGX(oo, ff) LOGX_ (oo, ff), LOGX_MASK
+
+#define PSE_LOGX_(oo, ff) \
+ (OP (oo) | (((ff) &0x3F) << 10) | (((ff) >> 0x6) << 26) | 0x3E0)
+#define PSE_LOGX(oo, ff) PSE_LOGX_ (oo, ff), LOGX_MASK
+
+/* Abbreviations for instruction subsets. */
+#define CORE3 AXP_OPCODE_SW6
+#define CORE3A AXP_OPCODE_SW6A
+#define CORE3B AXP_OPCODE_SW6B
+#define CORE4A AXP_OPCODE_SW8A | AXP_OPCODE_SW4E
+/* Common combinations of arguments. */
+#define ARG_NONE \
+ { \
+ 0 \
+ }
+#define ARG_BRA \
+ { \
+ RA, BDISP \
+ }
+#define ARG_FBRA \
+ { \
+ FA, BDISP \
+ }
+#define ARG_FP \
+ { \
+ FA, FB, DFC1 \
+ }
+#define ARG_FPZ1 \
+ { \
+ ZA, FB, DFC1 \
+ }
+#define ARG_MEM \
+ { \
+ RA, MDISP, PRB \
+ }
+#define ARG_FMEM \
+ { \
+ FA, MDISP, PRB \
+ }
+#define ARG_OPR \
+ { \
+ RA, RB, DRC1 \
+ }
+
+#define ARG_OPRCAS \
+ { \
+ RA, RB, RC \
+ }
+
+#define ARG_OPRL \
+ { \
+ RA, LIT, DRC1 \
+ }
+#define ARG_OPRZ1 \
+ { \
+ ZA, RB, DRC1 \
+ }
+#define ARG_OPRLZ1 \
+ { \
+ ZA, LIT, RC \
+ }
+#define ARG_PCD \
+ { \
+ PALFN \
+ }
+#define ARG_SW6HWMEM \
+ { \
+ RA, SW6HWDISP, PRB \
+ }
+
+#define ARG_FPL \
+ { \
+ FA, LIT, DFC1 \
+ }
+#define ARG_FMA \
+ { \
+ FA, FB, F3, DFC1 \
+ }
+#define ARG_PREFETCH \
+ { \
+ ZA, MDISP, PRB \
+ }
+#define ARG_FCMOV \
+ { \
+ FA, FB, F3, DFC3 \
+ }
+#define ARG_TOPR \
+ { \
+ RA, RB, R3, DRC3 \
+ }
+#define ARG_TOPRL \
+ { \
+ RA, LIT, R3, DRC3 \
+ }
+
+/* for cmov** instruction. */
+#define ARG_TOPC \
+ { \
+ RA, RB, R3, RDC \
+ }
+#define ARG_TOPCL \
+ { \
+ RA, LIT, R3, RDC \
+ }
+#define ARG_TOPFC \
+ { \
+ FA, FB, F3, RDC \
+ }
+#define ARG_TOPFCL \
+ { \
+ FA, LIT, F3, RDC \
+ }
+
+/* sw settle instruction. */
+#define ARG_FMAL \
+ { \
+ FA, FB, FMALIT, DFC1 \
+ }
+/* sw atom insitruction. */
+#define ARG_ATMEM \
+ { \
+ RA, ATMDISP, PRB \
+ }
+
+#define ARG_VUAMEM \
+ { \
+ FA, ATMDISP, PRB \
+ }
+#define ARG_OPRLZ3 \
+ { \
+ RA, LIT, ZC \
+ }
+
+#define ARG_DISP13 \
+ { \
+ DISP13, RC \
+ }
+
+/* The opcode table.
+
+ The format of the opcode table is:
+
+ NAME OPCODE MASK { OPERANDS }
+
+ NAME is the name of the instruction.
+
+ OPCODE is the instruction opcode.
+
+ MASK is the opcode mask; this is used to tell the disassembler
+ which bits in the actual opcode must match OPCODE.
+
+ OPERANDS is the list of operands.
+
+ The preceding macros merge the text of the OPCODE and MASK fields.
+
+ The disassembler reads the table in order and prints the first
+ instruction which matches, so this table is sorted to put more
+ specific instructions before more general instructions.
+
+ Otherwise, it is sorted by major opcode and minor function code.
+
+ There are three classes of not-really-instructions in this table:
+
+ ALIAS is another name for another instruction. Some of
+ these come from the Architecture Handbook, some
+ come from the original gas opcode tables. In all
+ cases, the functionality of the opcode is unchanged.
+
+ PSEUDO a stylized code form endorsed by Chapter A.4 of the
+ Architecture Handbook.
+
+ EXTRA a stylized code form found in the original gas tables.
+ */
+
+const struct sw_64_opcode sw_64_opcodes[] = {
+ {"sys_call/b", PCD (0x00, 0x00), CORE3, ARG_PCD},
+ {"sys_call", PCD (0x00, 0x01), CORE3, ARG_PCD},
+ {"draina", SPCD (0x00, 0x0002), CORE3, ARG_NONE},
+ {"bpt", SPCD (0x00, 0x0080), CORE3, ARG_NONE},
+ {"bugchk", SPCD (0x00, 0x0081), CORE3, ARG_NONE},
+ {"callsys", SPCD (0x00, 0x0083), CORE3, ARG_NONE},
+ {"chmk", SPCD (0x00, 0x0083), CORE3, ARG_NONE},
+ {"imb", SPCD (0x00, 0x0086), CORE3, ARG_NONE},
+ {"rduniq", SPCD (0x00, 0x009e), CORE3, ARG_NONE},
+ {"wruniq", SPCD (0x00, 0x009f), CORE3, ARG_NONE},
+ {"gentrap", SPCD (0x00, 0x00aa), CORE3, ARG_NONE},
+ {"call", MEM (0x01), CORE3, {RA, CPRB, JMPHINT}},
+ {"ret", MEM (0x02), CORE3, {RA, CPRB, RETHINT}},
+ {"ret",
+ MEM_ (0x02) | (31 << 21) | (26 << 16) | 1,
+ 0xFFFFFFFF,
+ CORE3,
+ {0}}, /*pseudo*/
+ {"jmp", MEM (0x03), CORE3, {RA, CPRB, JMPHINT}},
+ {"br", BRA (0x04), CORE3, {ZA, BDISP}}, /* pseudo */
+ {"br", BRA (0x04), CORE3, ARG_BRA},
+ {"bsr", BRA (0x05), CORE3, ARG_BRA},
+ {"memb", MFC (0x06, 0x0000), CORE3, ARG_NONE},
+ {"imemb", MFC (0x06, 0x0001), CORE3, ARG_NONE},
+ {"wmemb", MFC (0x06, 0x0002), CORE4A, ARG_NONE},
+ {"rtc", MFC (0x06, 0x0020), CORE3, {RA, ZB}},
+ {"rtc", MFC (0x06, 0x0020), CORE3, {RA, RB}},
+ {"rcid", MFC (0x06, 0x0040), CORE3, {RA, ZB}},
+ {"halt", MFC (0x06, 0x0080), CORE3, {ZA, ZB}},
+ {"rd_f", MFC (0x06, 0x1000), CORE3, {RA, ZB}},
+ {"wr_f", MFC (0x06, 0x1020), CORE3, {RA, ZB}},
+ {"rtid", MFC (0x06, 0x1040), CORE3, {RA}},
+ {"pri_rcsr", CSR (0x06, 0xFE), CORE3, {RA, RPIINDEX, ZB}},
+ {"pri_wcsr", CSR (0x06, 0xFF), CORE3, {RA, RPIINDEX, ZB}},
+ {"csrws", CSR (0x06, 0xFC), CORE4A, {RA, RPIINDEX, ZB}},
+ {"csrwc", CSR (0x06, 0xFD), CORE4A, {RA, RPIINDEX, ZB}},
+ {"csrr", CSR (0x06, 0xFE), CORE4A, {RA, RPIINDEX, ZB}},
+ {"csrw", CSR (0x06, 0xFF), CORE4A, {RA, RPIINDEX, ZB}},
+ {"pri_ret", PRIRET (0x07, 0x0), CORE3, {RA}},
+ {"pri_ret/b", PRIRET (0x07, 0x1), CORE3, {RA}},
+ {"lldw", ATMEM (0x08, 0x0), CORE3, ARG_ATMEM},
+ {"lldl", ATMEM (0x08, 0x1), CORE3, ARG_ATMEM},
+ {"ldw_inc", ATMEM (0x08, 0x2), CORE3, ARG_ATMEM},
+ {"ldl_inc", ATMEM (0x08, 0x3), CORE3, ARG_ATMEM},
+ {"ldw_dec", ATMEM (0x08, 0x4), CORE3, ARG_ATMEM},
+ {"ldl_dec", ATMEM (0x08, 0x5), CORE3, ARG_ATMEM},
+ {"ldw_set", ATMEM (0x08, 0x6), CORE3, ARG_ATMEM},
+ {"ldl_set", ATMEM (0x08, 0x7), CORE3, ARG_ATMEM},
+ {"lstw", ATMEM (0x08, 0x8), CORE3, ARG_ATMEM},
+ {"lstl", ATMEM (0x08, 0x9), CORE3, ARG_ATMEM},
+ {"ldw_nc", ATMEM (0x08, 0xA), CORE3, ARG_ATMEM},
+ {"ldl_nc", ATMEM (0x08, 0xB), CORE3, ARG_ATMEM},
+ {"ldd_nc", ATMEM (0x08, 0xC), CORE3, ARG_VUAMEM},
+ {"stw_nc", ATMEM (0x08, 0xD), CORE3, ARG_ATMEM},
+ {"stl_nc", ATMEM (0x08, 0xE), CORE3, ARG_ATMEM},
+ {"std_nc", ATMEM (0x08, 0xF), CORE3, ARG_VUAMEM},
+ {"fillcs", MEM (0x09), CORE3, ARG_PREFETCH},
+ {"ldwe", MEM (0x09), CORE3, ARG_FMEM}, // CORE3 v0.2a
+ {"e_fillcs", MEM (0x0A), CORE3, ARG_PREFETCH},
+ {"ldse", MEM (0x0A), CORE3, ARG_FMEM},
+ {"lds4e", MEM (0x0A), CORE3, ARG_FMEM}, /* pseudo CORE3 SIMD */
+ {"fillcs_e", MEM (0x0B), CORE3, ARG_PREFETCH},
+ {"ldde", MEM (0x0B), CORE3, ARG_FMEM},
+ {"ldd4e", MEM (0x0B), CORE3, ARG_FMEM}, /* pseudo CORE3 SIMD */
+ {"e_fillde", MEM (0x0C), CORE3, ARG_PREFETCH},
+ {"vlds", MEM (0x0C), CORE3, ARG_FMEM},
+ {"v4lds", MEM (0x0C), CORE3, ARG_FMEM},
+ {"vldd", MEM (0x0D), CORE3, ARG_FMEM},
+ {"v4ldd", MEM (0x0D), CORE3, ARG_FMEM},
+ {"vsts", MEM (0x0E), CORE3, ARG_FMEM},
+ {"v4sts", MEM (0x0E), CORE3, ARG_FMEM},
+ {"vstd", MEM (0x0F), CORE3, ARG_FMEM},
+ {"v4std", MEM (0x0F), CORE3, ARG_FMEM},
+ {"addw", OPR (0x10, 0x00), CORE3, ARG_OPR},
+ {"addw", OPRL (0x12, 0x00), CORE3, ARG_OPRL},
+ {"sextl", OPR (0x10, 0x00), CORE3, ARG_OPRZ1}, /* pseudo */
+ {"sextl", OPRL (0x12, 0x00), CORE3, ARG_OPRLZ1}, /* pseudo */
+ {"subw", OPR (0x10, 0x01), CORE3, ARG_OPR},
+ {"subw", OPRL (0x12, 0x01), CORE3, ARG_OPRL},
+ {"negw", OPR (0x10, 0x01), CORE3, ARG_OPRZ1}, /* pseudo swgcc */
+ {"negw", OPRL (0x12, 0x01), CORE3, ARG_OPRLZ1}, /* pseudo swgcc */
+ {"s4addw", OPR (0x10, 0x02), CORE3, ARG_OPR},
+ {"s4addw", OPRL (0x12, 0x02), CORE3, ARG_OPRL},
+ {"s4subw", OPR (0x10, 0x03), CORE3, ARG_OPR},
+ {"s4subw", OPRL (0x12, 0x03), CORE3, ARG_OPRL},
+ {"s8addw", OPR (0x10, 0x04), CORE3, ARG_OPR},
+ {"s8addw", OPRL (0x12, 0x04), CORE3, ARG_OPRL},
+ {"s8subw", OPR (0x10, 0x05), CORE3, ARG_OPR},
+ {"s8subw", OPRL (0x12, 0x05), CORE3, ARG_OPRL},
+ {"addl", OPR (0x10, 0x08), CORE3, ARG_OPR},
+ {"addl", OPRL (0x12, 0x08), CORE3, ARG_OPRL},
+ {"subl", OPR (0x10, 0x09), CORE3, ARG_OPR},
+ {"subl", OPRL (0x12, 0x09), CORE3, ARG_OPRL},
+ {"negl", OPR (0x10, 0x09), CORE3, ARG_OPRZ1}, /* pseudo swgcc */
+ {"negl", OPRL (0x12, 0x09), CORE3, ARG_OPRLZ1}, /* pseudo swgcc */
+ {"neglv", OPR (0x10, 0x09), CORE3, ARG_OPRZ1}, /* pseudo swgcc */
+ {"neglv", OPRL (0x12, 0x09), CORE3, ARG_OPRLZ1}, /* pseudo swgcc */
+ {"s4addl", OPR (0x10, 0x0A), CORE3, ARG_OPR},
+ {"s4addl", OPRL (0x12, 0x0A), CORE3, ARG_OPRL},
+ {"s4subl", OPR (0x10, 0x0B), CORE3, ARG_OPR},
+ {"s4subl", OPRL (0x12, 0x0B), CORE3, ARG_OPRL},
+ {"s8addl", OPR (0x10, 0x0C), CORE3, ARG_OPR},
+ {"s8addl", OPRL (0x12, 0x0C), CORE3, ARG_OPRL},
+ {"s8subl", OPR (0x10, 0x0D), CORE3, ARG_OPR},
+ {"s8subl", OPRL (0x12, 0x0D), CORE3, ARG_OPRL},
+ {"mulw", OPR (0x10, 0x10), CORE3, ARG_OPR},
+ {"mulw", OPRL (0x12, 0x10), CORE3, ARG_OPRL},
+ {"divw", OPR (0x10, 0x11), CORE4A, ARG_OPR},
+ {"udivw", OPR (0x10, 0x12), CORE4A, ARG_OPR},
+ {"remw", OPR (0x10, 0x13), CORE4A, ARG_OPR},
+ {"uremw", OPR (0x10, 0x14), CORE4A, ARG_OPR},
+ {"mull", OPR (0x10, 0x18), CORE3, ARG_OPR},
+ {"mull", OPRL (0x12, 0x18), CORE3, ARG_OPRL},
+ {"umulh", OPR (0x10, 0x19), CORE3, ARG_OPR},
+ {"umulh", OPRL (0x12, 0x19), CORE3, ARG_OPRL},
+ {"divl", OPR (0x10, 0x1A), CORE4A, ARG_OPR},
+ {"udivl", OPR (0x10, 0x1B), CORE4A, ARG_OPR},
+ {"reml", OPR (0x10, 0x1C), CORE4A, ARG_OPR},
+ {"ureml", OPR (0x10, 0x1D), CORE4A, ARG_OPR},
+ {"addpi", OPR (0x10, 0x1E), CORE4A, ARG_DISP13},
+ {"addpis", OPR (0x10, 0x1F), CORE4A, ARG_DISP13},
+ {"crc32b", OPR (0x10, 0x20), CORE4A, ARG_OPR},
+ {"crc32h", OPR (0x10, 0x21), CORE4A, ARG_OPR},
+ {"crc32w", OPR (0x10, 0x22), CORE4A, ARG_OPR},
+ {"crc32l", OPR (0x10, 0x23), CORE4A, ARG_OPR},
+ {"crc32cb", OPR (0x10, 0x24), CORE4A, ARG_OPR},
+ {"crc32ch", OPR (0x10, 0x25), CORE4A, ARG_OPR},
+ {"crc32cw", OPR (0x10, 0x26), CORE4A, ARG_OPR},
+ {"crc32cl", OPR (0x10, 0x27), CORE4A, ARG_OPR},
+ {"cmpeq", OPR (0x10, 0x28), CORE3, ARG_OPR},
+ {"cmpeq", OPRL (0x12, 0x28), CORE3, ARG_OPRL},
+ {"cmplt", OPR (0x10, 0x29), CORE3, ARG_OPR},
+ {"cmplt", OPRL (0x12, 0x29), CORE3, ARG_OPRL},
+ {"cmple", OPR (0x10, 0x2A), CORE3, ARG_OPR},
+ {"cmple", OPRL (0x12, 0x2A), CORE3, ARG_OPRL},
+ {"cmpult", OPR (0x10, 0x2B), CORE3, ARG_OPR},
+ {"cmpult", OPRL (0x12, 0x2B), CORE3, ARG_OPRL},
+ {"cmpule", OPR (0x10, 0x2C), CORE3, ARG_OPR},
+ {"cmpule", OPRL (0x12, 0x2C), CORE3, ARG_OPRL},
+ {"sbt", OPR (0x10, 0x2D), CORE4A, ARG_OPR},
+ {"sbt", OPRL (0x12, 0x2D), CORE4A, ARG_OPRL},
+ {"cbt", OPR (0x10, 0x2E), CORE4A, ARG_OPR},
+ {"cbt", OPRL (0x12, 0x2E), CORE4A, ARG_OPRL},
+ {"and", OPR (0x10, 0x38), CORE3, ARG_OPR},
+ {"and", OPRL (0x12, 0x38), CORE3, ARG_OPRL},
+ {"bic", OPR (0x10, 0x39), CORE3, ARG_OPR},
+ {"bic", OPRL (0x12, 0x39), CORE3, ARG_OPRL},
+ {"andnot", OPR (0x10, 0x39), CORE3, ARG_OPR}, /* pseudo */
+ {"andnot", OPRL (0x12, 0x39), CORE3, ARG_OPRL}, /* pseudo */
+ {"nop",
+ OPR (0x10, 0x3A),
+ CORE3,
+ {ZA, ZB, ZC}}, /* Now unop has a new expression. */
+ {"excb", OPR (0x10, 0x3A), CORE3, {ZA, ZB, ZC}}, /* pseudo */
+ {"clr", OPR (0x10, 0x3A), CORE3, {ZA, ZB, RC}}, /* pseudo swgcc */
+ {"mov", OPR (0x10, 0x3A), CORE3, {ZA, RB, RC}}, /* pseudo */
+ {"mov", OPRL (0x12, 0x3A), CORE3, {ZA, LIT, RC}}, /* pseudo */
+ {"implver",
+ OPRL_ (0x12, 0x3A) | 2 << 13,
+ 0xFFFFFFE0,
+ CORE3,
+ {ZA, RC}}, /* pseudo swgcc */
+ {"amask",
+ OPR_ (0x10, 0x3A) | 31 << 16,
+ OPR_MASK,
+ CORE3,
+ {ZA, RB, RC}}, /* pseudo */
+ {"amask", OPRL (0x12, 0x3A), CORE3, {ZA, LIT, RC}}, /* pseudo */
+ {"or", OPR (0x10, 0x3A), CORE3, ARG_OPR},
+ {"or", OPRL (0x12, 0x3A), CORE3, ARG_OPRL},
+ {"bis", OPR (0x10, 0x3A), CORE3, ARG_OPR},
+ {"bis", OPRL (0x12, 0x3A), CORE3, ARG_OPRL},
+ {"not", OPR (0x10, 0x3B), CORE3, ARG_OPRZ1}, /* pseudo swgcc */
+ {"not", OPRL (0x12, 0x3B), CORE3, ARG_OPRLZ1}, /* pseudo swgcc */
+ {"ornot", OPR (0x10, 0x3B), CORE3, ARG_OPR},
+ {"ornot", OPRL (0x12, 0x3B), CORE3, ARG_OPRL},
+ {"xor", OPR (0x10, 0x3C), CORE3, ARG_OPR},
+ {"xor", OPRL (0x12, 0x3C), CORE3, ARG_OPRL},
+ {"eqv", OPR (0x10, 0x3D), CORE3, ARG_OPR},
+ {"eqv", OPRL (0x12, 0x3D), CORE3, ARG_OPRL},
+ {"xornot", OPR (0x10, 0x3D), CORE3, ARG_OPR}, /* pseudo swgcc */
+ {"xornot", OPRL (0x12, 0x3D), CORE3, ARG_OPRL}, /* pseudo swgcc */
+ {"inslb", OPR (0x10, 0x40), CORE3, ARG_OPR},
+ {"inslb", OPRL (0x12, 0x40), CORE3, ARG_OPRL},
+ {"ins0b", OPR (0x10, 0x40), CORE3, ARG_OPR},
+ {"ins0b", OPRL (0x12, 0x40), CORE3, ARG_OPRL},
+ {"inslh", OPR (0x10, 0x41), CORE3, ARG_OPR},
+ {"inslh", OPRL (0x12, 0x41), CORE3, ARG_OPRL},
+ {"ins1b", OPR (0x10, 0x41), CORE3, ARG_OPR},
+ {"ins1b", OPRL (0x12, 0x41), CORE3, ARG_OPRL},
+ {"inslw", OPR (0x10, 0x42), CORE3, ARG_OPR},
+ {"inslw", OPRL (0x12, 0x42), CORE3, ARG_OPRL},
+ {"ins2b", OPR (0x10, 0x42), CORE3, ARG_OPR},
+ {"ins2b", OPRL (0x12, 0x42), CORE3, ARG_OPRL},
+ {"insll", OPR (0x10, 0x43), CORE3, ARG_OPR},
+ {"insll", OPRL (0x12, 0x43), CORE3, ARG_OPRL},
+ {"ins3b", OPR (0x10, 0x43), CORE3, ARG_OPR},
+ {"ins3b", OPRL (0x12, 0x43), CORE3, ARG_OPRL},
+ {"inshb", OPR (0x10, 0x44), CORE3, ARG_OPR},
+ {"inshb", OPRL (0x12, 0x44), CORE3, ARG_OPRL},
+ {"ins4b", OPR (0x10, 0x44), CORE3, ARG_OPR},
+ {"ins4b", OPRL (0x12, 0x44), CORE3, ARG_OPRL},
+ {"inshh", OPR (0x10, 0x45), CORE3, ARG_OPR},
+ {"inshh", OPRL (0x12, 0x45), CORE3, ARG_OPRL},
+ {"ins5b", OPR (0x10, 0x45), CORE3, ARG_OPR},
+ {"ins5b", OPRL (0x12, 0x45), CORE3, ARG_OPRL},
+ {"inshw", OPR (0x10, 0x46), CORE3, ARG_OPR},
+ {"inshw", OPRL (0x12, 0x46), CORE3, ARG_OPRL},
+ {"ins6b", OPR (0x10, 0x46), CORE3, ARG_OPR},
+ {"ins6b", OPRL (0x12, 0x46), CORE3, ARG_OPRL},
+ {"inshl", OPR (0x10, 0x47), CORE3, ARG_OPR},
+ {"inshl", OPRL (0x12, 0x47), CORE3, ARG_OPRL},
+ {"ins7b", OPR (0x10, 0x47), CORE3, ARG_OPR},
+ {"ins7b", OPRL (0x12, 0x47), CORE3, ARG_OPRL},
+ {"slll", OPR (0x10, 0x48), CORE4A, ARG_OPR},
+ {"slll", OPRL (0x12, 0x48), CORE4A, ARG_OPRL},
+ {"srll", OPR (0x10, 0x49), CORE4A, ARG_OPR},
+ {"srll", OPRL (0x12, 0x49), CORE4A, ARG_OPRL},
+ {"sral", OPR (0x10, 0x4A), CORE4A, ARG_OPR},
+ {"sral", OPRL (0x12, 0x4A), CORE4A, ARG_OPRL},
+ {"roll", OPR (0x10, 0x4B), CORE4A, ARG_OPR},
+ {"roll", OPRL (0x12, 0x4B), CORE4A, ARG_OPRL},
+ {"sllw", OPR (0x10, 0x4C), CORE4A, ARG_OPR},
+ {"sllw", OPRL (0x12, 0x4C), CORE4A, ARG_OPRL},
+ {"srlw", OPR (0x10, 0x4D), CORE4A, ARG_OPR},
+ {"srlw", OPRL (0x12, 0x4D), CORE4A, ARG_OPRL},
+ {"sraw", OPR (0x10, 0x4E), CORE4A, ARG_OPR},
+ {"sraw", OPRL (0x12, 0x4E), CORE4A, ARG_OPRL},
+ {"rolw", OPR (0x10, 0x4F), CORE4A, ARG_OPR},
+ {"rolw", OPRL (0x12, 0x4F), CORE4A, ARG_OPRL},
+ {"sll", OPR (0x10, 0x48), CORE3, ARG_OPR},
+ {"sll", OPRL (0x12, 0x48), CORE3, ARG_OPRL},
+ {"srl", OPR (0x10, 0x49), CORE3, ARG_OPR},
+ {"srl", OPRL (0x12, 0x49), CORE3, ARG_OPRL},
+ {"sra", OPR (0x10, 0x4A), CORE3, ARG_OPR},
+ {"sra", OPRL (0x12, 0x4A), CORE3, ARG_OPRL},
+ {"sllw2", OPR (0x10, 0x4C), CORE3, ARG_OPR}, // CORE3 v0.2a
+ {"sllw2", OPRL (0x12, 0x4C), CORE3, ARG_OPRL}, // CORE3 v0.2a
+ {"srlw2", OPR (0x10, 0x4D), CORE3, ARG_OPR}, // CORE3 v0.2a
+ {"srlw2", OPRL (0x12, 0x4D), CORE3, ARG_OPRL}, // CORE3 v0.2a
+ {"sraw2", OPR (0x10, 0x4E), CORE3, ARG_OPR}, // CORE3 v0.2a
+ {"sraw2", OPRL (0x12, 0x4E), CORE3, ARG_OPRL}, // CORE3 v0.2a
+ {"extlb", OPR (0x10, 0x50), CORE3, ARG_OPR},
+ {"extlb", OPRL (0x12, 0x50), CORE3, ARG_OPRL},
+ {"ext0b", OPR (0x10, 0x50), CORE3, ARG_OPR},
+ {"ext0b", OPRL (0x12, 0x50), CORE3, ARG_OPRL},
+ {"extlh", OPR (0x10, 0x51), CORE3, ARG_OPR},
+ {"extlh", OPRL (0x12, 0x51), CORE3, ARG_OPRL},
+ {"ext1b", OPR (0x10, 0x51), CORE3, ARG_OPR},
+ {"ext1b", OPRL (0x12, 0x51), CORE3, ARG_OPRL},
+ {"extlw", OPR (0x10, 0x52), CORE3, ARG_OPR},
+ {"extlw", OPRL (0x12, 0x52), CORE3, ARG_OPRL},
+ {"ext2b", OPR (0x10, 0x52), CORE3, ARG_OPR},
+ {"ext2b", OPRL (0x12, 0x52), CORE3, ARG_OPRL},
+ {"extll", OPR (0x10, 0x53), CORE3, ARG_OPR},
+ {"extll", OPRL (0x12, 0x53), CORE3, ARG_OPRL},
+ {"ext3b", OPR (0x10, 0x53), CORE3, ARG_OPR},
+ {"ext3b", OPRL (0x12, 0x53), CORE3, ARG_OPRL},
+ {"exthb", OPR (0x10, 0x54), CORE3, ARG_OPR},
+ {"exthb", OPRL (0x12, 0x54), CORE3, ARG_OPRL},
+ {"ext4b", OPR (0x10, 0x54), CORE3, ARG_OPR},
+ {"ext4b", OPRL (0x12, 0x54), CORE3, ARG_OPRL},
+ {"exthh", OPR (0x10, 0x55), CORE3, ARG_OPR},
+ {"exthh", OPRL (0x12, 0x55), CORE3, ARG_OPRL},
+ {"ext5b", OPR (0x10, 0x55), CORE3, ARG_OPR},
+ {"ext5b", OPRL (0x12, 0x55), CORE3, ARG_OPRL},
+ {"exthw", OPR (0x10, 0x56), CORE3, ARG_OPR},
+ {"exthw", OPRL (0x12, 0x56), CORE3, ARG_OPRL},
+ {"ext6b", OPR (0x10, 0x56), CORE3, ARG_OPR},
+ {"ext6b", OPRL (0x12, 0x56), CORE3, ARG_OPRL},
+ {"exthl", OPR (0x10, 0x57), CORE3, ARG_OPR},
+ {"exthl", OPRL (0x12, 0x57), CORE3, ARG_OPRL},
+ {"ext7b", OPR (0x10, 0x57), CORE3, ARG_OPR},
+ {"ext7b", OPRL (0x12, 0x57), CORE3, ARG_OPRL},
+ {"ctpop", OPR (0x10, 0x58), CORE3, ARG_OPRZ1},
+ {"ctlz", OPR (0x10, 0x59), CORE3, ARG_OPRZ1},
+ {"cttz", OPR (0x10, 0x5A), CORE3, ARG_OPRZ1},
+ {"revbh", OPR (0x10, 0x5B), CORE4A, ARG_OPRZ1},
+ {"revbw", OPR (0x10, 0x5C), CORE4A, ARG_OPRZ1},
+ {"revbl", OPR (0x10, 0x5D), CORE4A, ARG_OPRZ1},
+ {"casw", OPR (0x10, 0x5E), CORE4A, ARG_OPRCAS},
+ {"casl", OPR (0x10, 0x5F), CORE4A, ARG_OPRCAS},
+ {"masklb", OPR (0x10, 0x60), CORE3, ARG_OPR},
+ {"masklb", OPRL (0x12, 0x60), CORE3, ARG_OPRL},
+ {"mask0b", OPR (0x10, 0x60), CORE3, ARG_OPR},
+ {"mask0b", OPRL (0x12, 0x60), CORE3, ARG_OPRL},
+ {"masklh", OPR (0x10, 0x61), CORE3, ARG_OPR},
+ {"masklh", OPRL (0x12, 0x61), CORE3, ARG_OPRL},
+ {"mask1b", OPR (0x10, 0x61), CORE3, ARG_OPR},
+ {"mask1b", OPRL (0x12, 0x61), CORE3, ARG_OPRL},
+ {"masklw", OPR (0x10, 0x62), CORE3, ARG_OPR},
+ {"masklw", OPRL (0x12, 0x62), CORE3, ARG_OPRL},
+ {"mask2b", OPR (0x10, 0x62), CORE3, ARG_OPR},
+ {"mask2b", OPRL (0x12, 0x62), CORE3, ARG_OPRL},
+ {"maskll", OPR (0x10, 0x63), CORE3, ARG_OPR},
+ {"maskll", OPRL (0x12, 0x63), CORE3, ARG_OPRL},
+ {"mask3b", OPR (0x10, 0x63), CORE3, ARG_OPR},
+ {"mask3b", OPRL (0x12, 0x63), CORE3, ARG_OPRL},
+ {"maskhb", OPR (0x10, 0x64), CORE3, ARG_OPR},
+ {"maskhb", OPRL (0x12, 0x64), CORE3, ARG_OPRL},
+ {"mask4b", OPR (0x10, 0x64), CORE3, ARG_OPR},
+ {"mask4b", OPRL (0x12, 0x64), CORE3, ARG_OPRL},
+ {"maskhh", OPR (0x10, 0x65), CORE3, ARG_OPR},
+ {"maskhh", OPRL (0x12, 0x65), CORE3, ARG_OPRL},
+ {"mask5b", OPR (0x10, 0x65), CORE3, ARG_OPR},
+ {"mask5b", OPRL (0x12, 0x65), CORE3, ARG_OPRL},
+ {"maskhw", OPR (0x10, 0x66), CORE3, ARG_OPR},
+ {"maskhw", OPRL (0x12, 0x66), CORE3, ARG_OPRL},
+ {"mask6b", OPR (0x10, 0x66), CORE3, ARG_OPR},
+ {"mask6b", OPRL (0x12, 0x66), CORE3, ARG_OPRL},
+ {"maskhl", OPR (0x10, 0x67), CORE3, ARG_OPR},
+ {"maskhl", OPRL (0x12, 0x67), CORE3, ARG_OPRL},
+ {"mask7b", OPR (0x10, 0x67), CORE3, ARG_OPR},
+ {"mask7b", OPRL (0x12, 0x67), CORE3, ARG_OPRL},
+ {"zap", OPR (0x10, 0x68), CORE3, ARG_OPR},
+ {"zap", OPRL (0x12, 0x68), CORE3, ARG_OPRL},
+ {"zapnot", OPR (0x10, 0x69), CORE3, ARG_OPR},
+ {"zapnot", OPRL (0x12, 0x69), CORE3, ARG_OPRL},
+ {"sextb", OPR (0x10, 0x6A), CORE3, ARG_OPRZ1},
+ {"sextb", OPRL (0x12, 0x6A), CORE3, ARG_OPRLZ1},
+ {"sexth", OPR (0x10, 0x6B), CORE3, ARG_OPRZ1},
+ {"sexth", OPRL (0x12, 0x6B), CORE3, ARG_OPRLZ1},
+ {"cmpgeb", OPR (0x10, 0x6C), CORE3, ARG_OPR},
+ {"cmpgeb", OPRL (0x12, 0x6C), CORE3, ARG_OPRL},
+ {"fimovs", OPR (0x10, 0x70), CORE3, {FA, ZB, RC}},
+ {"cmovdl", OPR (0x10, 0x72), CORE4A, {ZA, FB, RC}},
+ {"cmovdl_g", OPR (0x10, 0x74), CORE4A, {ZA, FB, RC}},
+ {"fimovd", OPR (0x10, 0x78), CORE3, {FA, ZB, RC}},
+ {"ftoid", OPR (0x10, 0x78), CORE3, {FA, ZB, RC}},
+ {"cmovdl_p", OPR (0x10, 0x7a), CORE4A, {ZA, FB, RC}},
+ {"cmovdl_z", OPR (0x10, 0x7c), CORE4A, {ZA, FB, RC}},
+ {"cmovdl_n", OPR (0x10, 0x80), CORE4A, {ZA, FB, RC}},
+ {"cmovdlu", OPR (0x10, 0x81), CORE4A, {ZA, FB, RC}},
+ {"cmovdlu_g", OPR (0x10, 0x82), CORE4A, {ZA, FB, RC}},
+ {"cmovdlu_p", OPR (0x10, 0x83), CORE4A, {ZA, FB, RC}},
+ {"cmovdlu_z", OPR (0x10, 0x84), CORE4A, {ZA, FB, RC}},
+ {"cmovdlu_n", OPR (0x10, 0x85), CORE4A, {ZA, FB, RC}},
+ {"cmovdwu", OPR (0x10, 0x86), CORE4A, {ZA, FB, RC}},
+ {"cmovdwu_g", OPR (0x10, 0x87), CORE4A, {ZA, FB, RC}},
+ {"cmovdwu_p", OPR (0x10, 0x88), CORE4A, {ZA, FB, RC}},
+ {"cmovdwu_z", OPR (0x10, 0x89), CORE4A, {ZA, FB, RC}},
+ {"cmovdwu_n", OPR (0x10, 0x8a), CORE4A, {ZA, FB, RC}},
+ {"cmovdw", OPR (0x10, 0x8b), CORE4A, {ZA, FB, RC}},
+ {"cmovdw_g", OPR (0x10, 0x8c), CORE4A, {ZA, FB, RC}},
+ {"cmovdw_p", OPR (0x10, 0x8d), CORE4A, {ZA, FB, RC}},
+ {"cmovdw_z", OPR (0x10, 0x8e), CORE4A, {ZA, FB, RC}},
+ {"cmovdw_n", OPR (0x10, 0x8f), CORE4A, {ZA, FB, RC}},
+ {"seleq", TOPR (0x11, 0x0), CORE3, ARG_TOPR},
+ {"seleq", TOPRL (0x13, 0x0), CORE3, ARG_TOPRL},
+ {"selge", TOPR (0x11, 0x1), CORE3, ARG_TOPR},
+ {"selge", TOPRL (0x13, 0x1), CORE3, ARG_TOPRL},
+ {"selgt", TOPR (0x11, 0x2), CORE3, ARG_TOPR},
+ {"selgt", TOPRL (0x13, 0x2), CORE3, ARG_TOPRL},
+ {"selle", TOPR (0x11, 0x3), CORE3, ARG_TOPR},
+ {"selle", TOPRL (0x13, 0x3), CORE3, ARG_TOPRL},
+ {"sellt", TOPR (0x11, 0x4), CORE3, ARG_TOPR},
+ {"sellt", TOPRL (0x13, 0x4), CORE3, ARG_TOPRL},
+ {"selne", TOPR (0x11, 0x5), CORE3, ARG_TOPR},
+ {"selne", TOPRL (0x13, 0x5), CORE3, ARG_TOPRL},
+ {"sellbc", TOPR (0x11, 0x6), CORE3, ARG_TOPR},
+ {"sellbc", TOPRL (0x13, 0x6), CORE3, ARG_TOPRL},
+ {"sellbs", TOPR (0x11, 0x7), CORE3, ARG_TOPR},
+ {"sellbs", TOPRL (0x13, 0x7), CORE3, ARG_TOPRL},
+ {"vlog", LOGX (0x14, 0x00), CORE3, ARG_FMA},
+
+ {"vbicw", PSE_LOGX (0x14, 0x30), CORE3, {FA, FB, DFC1}},
+ {"vxorw", PSE_LOGX (0x14, 0x3c), CORE3, {FA, FB, DFC1}},
+ {"vandw", PSE_LOGX (0x14, 0xc0), CORE3, {FA, FB, DFC1}},
+ {"veqvw", PSE_LOGX (0x14, 0xc3), CORE3, {FA, FB, DFC1}},
+ {"vornotw", PSE_LOGX (0x14, 0xf3), CORE3, {FA, FB, DFC1}},
+ {"vbisw", PSE_LOGX (0x14, 0xfc), CORE3, {FA, FB, DFC1}},
+
+ {"fadds", FP (0x18, 0x00), CORE3, ARG_FP},
+ {"faddd", FP (0x18, 0x01), CORE3, ARG_FP},
+ {"fsubs", FP (0x18, 0x02), CORE3, ARG_FP},
+ {"fsubd", FP (0x18, 0x03), CORE3, ARG_FP},
+ {"fmuls", FP (0x18, 0x04), CORE3, ARG_FP},
+ {"fmuld", FP (0x18, 0x05), CORE3, ARG_FP},
+ {"fdivs", FP (0x18, 0x06), CORE3, ARG_FP},
+ {"fdivd", FP (0x18, 0x07), CORE3, ARG_FP},
+ {"fsqrts", FP (0x18, 0x08), CORE3, ARG_FPZ1},
+ {"fsqrtd", FP (0x18, 0x09), CORE3, ARG_FPZ1},
+ {"fcmpeq", FP (0x18, 0x10), CORE3, ARG_FP},
+ {"fcmple", FP (0x18, 0x11), CORE3, ARG_FP},
+ {"fcmplt", FP (0x18, 0x12), CORE3, ARG_FP},
+ {"fcmpun", FP (0x18, 0x13), CORE3, ARG_FP},
+
+ {"fcvtsd", FP (0x18, 0x20), CORE3, ARG_FPZ1},
+ {"fcvtds", FP (0x18, 0x21), CORE3, ARG_FPZ1},
+ {"fcvtdl_g", FP (0x18, 0x22), CORE3, ARG_FPZ1},
+ {"fcvtdl_p", FP (0x18, 0x23), CORE3, ARG_FPZ1},
+ {"fcvtdl_z", FP (0x18, 0x24), CORE3, ARG_FPZ1},
+ {"fcvtdl_n", FP (0x18, 0x25), CORE3, ARG_FPZ1},
+ {"fcvtdl", FP (0x18, 0x27), CORE3, ARG_FPZ1},
+ {"fcvtwl", FP (0x18, 0x28), CORE3, ARG_FPZ1},
+ {"fcvtlw", FP (0x18, 0x29), CORE3, ARG_FPZ1},
+ {"fcvtls", FP (0x18, 0x2d), CORE3, ARG_FPZ1},
+ {"fcvths", FP (0x18, 0x2e), CORE4A, ARG_FPZ1},
+ {"fcvtld", FP (0x18, 0x2f), CORE3, ARG_FPZ1},
+
+ {"fnop", FP (0x18, 0x030), CORE3, {ZA, ZB, ZC}},
+ {"fclr", FP (0x18, 0x030), CORE3, {ZA, ZB, FC}},
+ {"fabs", FP (0x18, 0x030), CORE3, ARG_FPZ1},
+ {"fcpys", FP (0x18, 0x30), CORE3, ARG_FP},
+ {"fmov", FP (0x18, 0x30), CORE3, {FA, RBA, FC}},
+ {"fcpyse", FP (0x18, 0x31), CORE3, ARG_FP},
+ {"fneg", FP (0x18, 0x32), CORE3, {FA, RBA, FC}},
+ {"fcpysn", FP (0x18, 0x32), CORE3, ARG_FP},
+
+ {"ifmovs", FP (0x18, 0x40), CORE3, {RA, ZB, FC}},
+ {"ifmovd", FP (0x18, 0x41), CORE3, {RA, ZB, FC}},
+ {"itofd", FP (0x18, 0x41), CORE3, {RA, ZB, FC}},
+ {"cmovls", FP (0x18, 0x48), CORE4A, {ZA, RB, FC}},
+ {"cmovws", FP (0x18, 0x49), CORE4A, {ZA, RB, FC}},
+ {"cmovld", FP (0x18, 0x4a), CORE4A, {ZA, RB, FC}},
+ {"cmovwd", FP (0x18, 0x4b), CORE4A, {ZA, RB, FC}},
+ {"cmovuls", FP (0x18, 0x4c), CORE4A, {ZA, RB, FC}},
+ {"cmovuws", FP (0x18, 0x4d), CORE4A, {ZA, RB, FC}},
+ {"cmovuld", FP (0x18, 0x4e), CORE4A, {ZA, RB, FC}},
+ {"cmovuwd", FP (0x18, 0x4f), CORE4A, {ZA, RB, FC}},
+ {"rfpcr", FP (0x18, 0x50), CORE3, {FA, RBA, RCA}},
+ {"wfpcr", FP (0x18, 0x51), CORE3, {FA, RBA, RCA}},
+ {"setfpec0", FP (0x18, 0x54), CORE3, ARG_NONE},
+ {"setfpec1", FP (0x18, 0x55), CORE3, ARG_NONE},
+ {"setfpec2", FP (0x18, 0x56), CORE3, ARG_NONE},
+ {"setfpec3", FP (0x18, 0x57), CORE3, ARG_NONE},
+ {"frecs", FP (0x18, 0x58), CORE4A, {FA, ZB, DFC1}},
+ {"frecd", FP (0x18, 0x59), CORE4A, {FA, ZB, DFC1}},
+ {"fris", FP (0x18, 0x5A), CORE4A, ARG_FPZ1},
+ {"fris_g", FP (0x18, 0x5B), CORE4A, ARG_FPZ1},
+ {"fris_p", FP (0x18, 0x5C), CORE4A, ARG_FPZ1},
+ {"fris_z", FP (0x18, 0x5D), CORE4A, ARG_FPZ1},
+ {"fris_n", FP (0x18, 0x5F), CORE4A, ARG_FPZ1},
+ {"frid", FP (0x18, 0x60), CORE4A, ARG_FPZ1},
+ {"frid_g", FP (0x18, 0x61), CORE4A, ARG_FPZ1},
+ {"frid_p", FP (0x18, 0x62), CORE4A, ARG_FPZ1},
+ {"frid_z", FP (0x18, 0x63), CORE4A, ARG_FPZ1},
+ {"frid_n", FP (0x18, 0x64), CORE4A, ARG_FPZ1},
+ {"fmas", FMA (0x19, 0x00), CORE3, ARG_FMA},
+ {"fmad", FMA (0x19, 0x01), CORE3, ARG_FMA},
+ {"fmss", FMA (0x19, 0x02), CORE3, ARG_FMA},
+ {"fmsd", FMA (0x19, 0x03), CORE3, ARG_FMA},
+ {"fnmas", FMA (0x19, 0x04), CORE3, ARG_FMA},
+ {"fnmad", FMA (0x19, 0x05), CORE3, ARG_FMA},
+ {"fnmss", FMA (0x19, 0x06), CORE3, ARG_FMA},
+ {"fnmsd", FMA (0x19, 0x07), CORE3, ARG_FMA},
+
+ // fcmov*(SW6) to fcmov*(SW6) for fcmov* no need in sw64, and fsel*->fcmov*
+ // has difference in operands number,so it should not repalce directly. The
+ // default FD should be the same FC but not FA
+ {"fseleq", FMA (0x19, 0x10), CORE3, ARG_FCMOV},
+ {"fselne", FMA (0x19, 0x11), CORE3, ARG_FCMOV},
+ {"fsellt", FMA (0x19, 0x12), CORE3, ARG_FCMOV},
+ {"fselle", FMA (0x19, 0x13), CORE3, ARG_FCMOV},
+ {"fselgt", FMA (0x19, 0x14), CORE3, ARG_FCMOV},
+ {"fselge", FMA (0x19, 0x15), CORE3, ARG_FCMOV},
+
+ {"vaddw", FP (0x1A, 0x00), CORE3, ARG_FP},
+ {"vaddw", FP (0x1A, 0x20), CORE3, ARG_FPL},
+ {"vsubw", FP (0x1A, 0x01), CORE3, ARG_FP},
+ {"vsubw", FP (0x1A, 0x21), CORE3, ARG_FPL},
+ {"vcmpgew", FP (0x1A, 0x02), CORE3, ARG_FP},
+ {"vcmpgew", FP (0x1A, 0x22), CORE3, ARG_FPL},
+ {"vcmpeqw", FP (0x1A, 0x03), CORE3, ARG_FP},
+ {"vcmpeqw", FP (0x1A, 0x23), CORE3, ARG_FPL},
+ {"vcmplew", FP (0x1A, 0x04), CORE3, ARG_FP},
+ {"vcmplew", FP (0x1A, 0x24), CORE3, ARG_FPL},
+ {"vcmpltw", FP (0x1A, 0x05), CORE3, ARG_FP},
+ {"vcmpltw", FP (0x1A, 0x25), CORE3, ARG_FPL},
+ {"vcmpulew", FP (0x1A, 0x06), CORE3, ARG_FP},
+ {"vcmpulew", FP (0x1A, 0x26), CORE3, ARG_FPL},
+ {"vcmpultw", FP (0x1A, 0x07), CORE3, ARG_FP},
+ {"vcmpultw", FP (0x1A, 0x27), CORE3, ARG_FPL},
+
+ {"vsllw", FP (0x1A, 0x08), CORE3, ARG_FP},
+ {"vsllw", FP (0x1A, 0x28), CORE3, ARG_FPL},
+ {"vsrlw", FP (0x1A, 0x09), CORE3, ARG_FP},
+ {"vsrlw", FP (0x1A, 0x29), CORE3, ARG_FPL},
+ {"vsraw", FP (0x1A, 0x0A), CORE3, ARG_FP},
+ {"vsraw", FP (0x1A, 0x2A), CORE3, ARG_FPL},
+ {"vrolw", FP (0x1A, 0x0B), CORE3, ARG_FP},
+ {"vrolw", FP (0x1A, 0x2B), CORE3, ARG_FPL},
+ {"sllow", FP (0x1A, 0x0C), CORE3, ARG_FP},
+ {"sllow", FP (0x1A, 0x2C), CORE3, ARG_FPL},
+ {"srlow", FP (0x1A, 0x0D), CORE3, ARG_FP},
+ {"srlow", FP (0x1A, 0x2D), CORE3, ARG_FPL},
+ {"vaddl", FP (0x1A, 0x0E), CORE3, ARG_FP},
+ {"vaddl", FP (0x1A, 0x2E), CORE3, ARG_FPL},
+ {"vsubl", FP (0x1A, 0x0F), CORE3, ARG_FP},
+ {"vsubl", FP (0x1A, 0x2F), CORE3, ARG_FPL},
+ {"vsllb", FP (0x1A, 0x10), CORE4A, ARG_FP},
+ {"vsllb", FP (0x1A, 0x30), CORE4A, ARG_FPL},
+ {"vsrlb", FP (0x1A, 0x11), CORE4A, ARG_FP},
+ {"vsrlb", FP (0x1A, 0x31), CORE4A, ARG_FPL},
+ {"vsrab", FP (0x1A, 0x12), CORE4A, ARG_FP},
+ {"vsrab", FP (0x1A, 0x32), CORE4A, ARG_FPL},
+ {"vrolb", FP (0x1A, 0x13), CORE4A, ARG_FP},
+ {"vrolb", FP (0x1A, 0x33), CORE4A, ARG_FPL},
+ {"vsllh", FP (0x1A, 0x14), CORE4A, ARG_FP},
+ {"vsllh", FP (0x1A, 0x34), CORE4A, ARG_FPL},
+ {"vsrlh", FP (0x1A, 0x15), CORE4A, ARG_FP},
+ {"vsrlh", FP (0x1A, 0x35), CORE4A, ARG_FPL},
+ {"vsrah", FP (0x1A, 0x16), CORE4A, ARG_FP},
+ {"vsrah", FP (0x1A, 0x36), CORE4A, ARG_FPL},
+ {"vrolh", FP (0x1A, 0x17), CORE4A, ARG_FP},
+ {"vrolh", FP (0x1A, 0x37), CORE4A, ARG_FPL},
+ {"ctpopow", FP (0x1A, 0x18), CORE3, {FA, ZB, DFC1}},
+ {"ctlzow", FP (0x1A, 0x19), CORE3, {FA, ZB, DFC1}},
+ {"vslll", FP (0x1A, 0x1A), CORE4A, ARG_FP},
+ {"vslll", FP (0x1A, 0x3A), CORE4A, ARG_FPL},
+ {"vsrll", FP (0x1A, 0x1B), CORE4A, ARG_FP},
+ {"vsrll", FP (0x1A, 0x3B), CORE4A, ARG_FPL},
+ {"vsral", FP (0x1A, 0x1C), CORE4A, ARG_FP},
+ {"vsral", FP (0x1A, 0x3C), CORE4A, ARG_FPL},
+ {"vroll", FP (0x1A, 0x1D), CORE4A, ARG_FP},
+ {"vroll", FP (0x1A, 0x3D), CORE4A, ARG_FPL},
+ {"vmaxb", FP (0x1A, 0x1E), CORE4A, ARG_FP},
+ {"vminb", FP (0x1A, 0x1F), CORE4A, ARG_FP},
+ {"vucaddw", FP (0x1A, 0x40), CORE3, ARG_FP},
+ {"vucaddw", FP (0x1A, 0x60), CORE3, ARG_FPL},
+ {"vucsubw", FP (0x1A, 0x41), CORE3, ARG_FP},
+ {"vucsubw", FP (0x1A, 0x61), CORE3, ARG_FPL},
+ {"vucaddh", FP (0x1A, 0x42), CORE3, ARG_FP},
+ {"vucaddh", FP (0x1A, 0x62), CORE3, ARG_FPL},
+ {"vucsubh", FP (0x1A, 0x43), CORE3, ARG_FP},
+ {"vucsubh", FP (0x1A, 0x63), CORE3, ARG_FPL},
+ {"vucaddb", FP (0x1A, 0x44), CORE3, ARG_FP},
+ {"vucaddb", FP (0x1A, 0x64), CORE3, ARG_FPL},
+ {"vucsubb", FP (0x1A, 0x45), CORE3, ARG_FP},
+ {"vucsubb", FP (0x1A, 0x65), CORE3, ARG_FPL},
+ {"sraow", FP (0x1A, 0x46), CORE4A, ARG_FP},
+ {"sraow", FP (0x1A, 0x66), CORE4A, ARG_FPL},
+ {"vsumw", FP (0x1A, 0x47), CORE4A, {FA, ZB, DFC1}},
+ {"vsuml", FP (0x1A, 0x48), CORE4A, {FA, ZB, DFC1}},
+ {"vcmpueqb", FP (0x1A, 0x4B), CORE4A, ARG_FP},
+ {"vcmpueqb", FP (0x1A, 0x6B), CORE4A, ARG_FPL},
+ {"vcmpugtb", FP (0x1A, 0x4C), CORE4A, ARG_FP},
+ {"vcmpugtb", FP (0x1A, 0x6C), CORE4A, ARG_FPL},
+ {"vmaxh", FP (0x1A, 0x50), CORE4A, ARG_FP},
+ {"vminh", FP (0x1A, 0x51), CORE4A, ARG_FP},
+ {"vmaxw", FP (0x1A, 0x52), CORE4A, ARG_FP},
+ {"vminw", FP (0x1A, 0x53), CORE4A, ARG_FP},
+ {"vmaxl", FP (0x1A, 0x54), CORE4A, ARG_FP},
+ {"vminl", FP (0x1A, 0x55), CORE4A, ARG_FP},
+ {"vumaxb", FP (0x1A, 0x56), CORE4A, ARG_FP},
+ {"vuminb", FP (0x1A, 0x57), CORE4A, ARG_FP},
+ {"vumaxh", FP (0x1A, 0x58), CORE4A, ARG_FP},
+ {"vuminh", FP (0x1A, 0x59), CORE4A, ARG_FP},
+ {"vumaxw", FP (0x1A, 0x5A), CORE4A, ARG_FP},
+ {"vuminw", FP (0x1A, 0x5B), CORE4A, ARG_FP},
+ {"vumaxl", FP (0x1A, 0x5C), CORE4A, ARG_FP},
+ {"vuminl", FP (0x1A, 0x5D), CORE4A, ARG_FP},
+ {"vsm3msw", FP (0x1A, 0x4D), CORE4A, ARG_FP},
+ {"vsm4key", FP (0x1A, 0x68), CORE4A, ARG_FPL},
+ {"vsm4r", FP (0x1A, 0x49), CORE4A, ARG_FP},
+ {"vbinvw", FP (0x1A, 0x4A), CORE4A, ARG_FPZ1},
+ {"vadds", FP (0x1A, 0x80), CORE3, ARG_FP},
+ {"v4adds", FP (0x1A, 0x80), CORE3, ARG_FP}, /* pseudo CORE3 SIMD*/
+ {"vaddd", FP (0x1A, 0x81), CORE3, ARG_FP},
+ {"v4addd", FP (0x1A, 0x81), CORE3, ARG_FP}, /* pseudo CORE3 SIMD*/
+ {"vsubs", FP (0x1A, 0x82), CORE3, ARG_FP},
+ {"v4subs", FP (0x1A, 0x82), CORE3, ARG_FP}, /* pseudo CORE3 SIMD*/
+ {"vsubd", FP (0x1A, 0x83), CORE3, ARG_FP},
+ {"v4subd", FP (0x1A, 0x83), CORE3, ARG_FP}, /* pseudo CORE3 SIMD*/
+ {"vmuls", FP (0x1A, 0x84), CORE3, ARG_FP},
+ {"v4muls", FP (0x1A, 0x84), CORE3, ARG_FP}, /* pseudo CORE3 SIMD*/
+ {"vmuld", FP (0x1A, 0x85), CORE3, ARG_FP},
+ {"v4muld", FP (0x1A, 0x85), CORE3, ARG_FP}, /* pseudo CORE3 SIMD*/
+ {"vdivs", FP (0x1A, 0x86), CORE3, ARG_FP},
+ {"vdivd", FP (0x1A, 0x87), CORE3, ARG_FP},
+ {"vsqrts", FP (0x1A, 0x88), CORE3, ARG_FPZ1},
+ {"vsqrtd", FP (0x1A, 0x89), CORE3, ARG_FPZ1},
+ {"vfcmpeq", FP (0x1A, 0x8C), CORE3, ARG_FP},
+ {"vfcmple", FP (0x1A, 0x8D), CORE3, ARG_FP},
+ {"vfcmplt", FP (0x1A, 0x8E), CORE3, ARG_FP},
+ {"vfcmpun", FP (0x1A, 0x8F), CORE3, ARG_FP},
+ {"vcpys", FP (0x1A, 0x90), CORE3, ARG_FP},
+ {"vfmov", FP (0x1A, 0x90), CORE3, {FA, RBA, FC}}, // V1.1
+ {"vcpyse", FP (0x1A, 0x91), CORE3, ARG_FP}, // CORE3 1.0
+ {"vcpysn", FP (0x1A, 0x92), CORE3, ARG_FP}, // CORE3 1.0
+ {"vsums", FP (0x1A, 0x93), CORE4A, {FA, ZB, DFC1}},
+ {"vsumd", FP (0x1A, 0x94), CORE4A, {FA, ZB, DFC1}},
+ {"vfcvtsd", FP (0x1A, 0x95), CORE4A, ARG_FPZ1},
+ {"vfcvtds", FP (0x1A, 0x96), CORE4A, ARG_FPZ1},
+ {"vfcvtls", FP (0x1A, 0x99), CORE4A, ARG_FPZ1},
+ {"vfcvtld", FP (0x1A, 0x9A), CORE4A, ARG_FPZ1},
+ {"vfcvtdl", FP (0x1A, 0x9B), CORE4A, ARG_FPZ1},
+ {"vfcvtdl_g", FP (0x1A, 0x9C), CORE4A, ARG_FPZ1},
+ {"vfcvtdl_p", FP (0x1A, 0x9D), CORE4A, ARG_FPZ1},
+ {"vfcvtdl_z", FP (0x1A, 0x9E), CORE4A, ARG_FPZ1},
+ {"vfcvtdl_n", FP (0x1A, 0x9F), CORE4A, ARG_FPZ1},
+ {"vfris", FP (0x1A, 0xA0), CORE4A, ARG_FPZ1},
+ {"vfris_g", FP (0x1A, 0xA1), CORE4A, ARG_FPZ1},
+ {"vfris_p", FP (0x1A, 0xA2), CORE4A, ARG_FPZ1},
+ {"vfris_z", FP (0x1A, 0xA3), CORE4A, ARG_FPZ1},
+ {"vfris_n", FP (0x1A, 0xA4), CORE4A, ARG_FPZ1},
+ {"vfrid", FP (0x1A, 0xA5), CORE4A, ARG_FPZ1},
+ {"vfrid_g", FP (0x1A, 0xA6), CORE4A, ARG_FPZ1},
+ {"vfrid_p", FP (0x1A, 0xA7), CORE4A, ARG_FPZ1},
+ {"vfrid_z", FP (0x1A, 0xA8), CORE4A, ARG_FPZ1},
+ {"vfrid_n", FP (0x1A, 0xA9), CORE4A, ARG_FPZ1},
+ {"vfrecs", FP (0x1A, 0xAA), CORE4A, {FA, ZB, DFC1}},
+ {"vfrecd", FP (0x1A, 0xAB), CORE4A, {FA, ZB, DFC1}},
+ {"vmaxs", FP (0x1A, 0xAC), CORE4A, ARG_FP},
+ {"vmins", FP (0x1A, 0xAD), CORE4A, ARG_FP},
+ {"vmaxd", FP (0x1A, 0xAE), CORE4A, ARG_FP},
+ {"vmind", FP (0x1A, 0xAF), CORE4A, ARG_FP},
+ {"addow", FP (0x1A, 0xB0), CORE4A, ARG_FP},
+ {"addow", FP (0x1A, 0xC0), CORE4A, ARG_FPL},
+ {"subow", FP (0x1A, 0xB1), CORE4A, ARG_FP},
+ {"subow", FP (0x1A, 0xC1), CORE4A, ARG_FPL},
+ {"cmpeqow", FP (0x1A, 0xB2), CORE4A, ARG_FP},
+ {"cmpuleow", FP (0x1A, 0xB3), CORE4A, ARG_FP},
+ {"cmpultow", FP (0x1A, 0xB4), CORE4A, ARG_FP},
+ {"umulow", FP (0x1A, 0xB5), CORE4A, ARG_FP},
+ {"vaesenc", FP (0x1A, 0xB6), CORE4A, ARG_FP},
+ {"vaesencl", FP (0x1A, 0xB7), CORE4A, ARG_FP},
+ {"vaesdec", FP (0x1A, 0xB8), CORE4A, ARG_FP},
+ {"vaesdecl", FP (0x1A, 0xB9), CORE4A, ARG_FP},
+ {"vaessbox", FP (0x1A, 0xBA), CORE4A, {FA, ZB, DFC1}},
+ {"sha1msw", FP (0x1A, 0xBC), CORE4A, ARG_FP},
+ {"sha256msw", FP (0x1A, 0xBD), CORE4A, ARG_FP},
+ {"sha512msl0", FP (0x1A, 0xBE), CORE4A, ARG_FP},
+ {"vmas", FMA (0x1B, 0x00), CORE3, ARG_FMA},
+ {"vmad", FMA (0x1B, 0x01), CORE3, ARG_FMA},
+ {"vmss", FMA (0x1B, 0x02), CORE3, ARG_FMA},
+ {"vmsd", FMA (0x1B, 0x03), CORE3, ARG_FMA},
+ {"vnmas", FMA (0x1B, 0x04), CORE3, ARG_FMA},
+ {"vnmad", FMA (0x1B, 0x05), CORE3, ARG_FMA},
+ {"vnmss", FMA (0x1B, 0x06), CORE3, ARG_FMA},
+ {"vnmsd", FMA (0x1B, 0x07), CORE3, ARG_FMA},
+ {"vfseleq", FMA (0x1B, 0x10), CORE3, ARG_FMA},
+ {"vfsellt", FMA (0x1B, 0x12), CORE3, ARG_FMA},
+ {"vfselle", FMA (0x1B, 0x13), CORE3, ARG_FMA},
+ {"vseleqw", FMA (0x1B, 0x18), CORE3, ARG_FMA},
+ {"vseleqw", FMA (0x1B, 0x38), CORE3, ARG_FMAL},
+ {"vsellbcw", FMA (0x1B, 0x19), CORE3, ARG_FMA},
+ {"vsellbcw", FMA (0x1B, 0x39), CORE3, ARG_FMAL},
+ {"vselltw", FMA (0x1B, 0x1A), CORE3, ARG_FMA},
+ {"vselltw", FMA (0x1B, 0x3A), CORE3, ARG_FMAL},
+ {"vsellew", FMA (0x1B, 0x1B), CORE3, ARG_FMA},
+ {"vsellew", FMA (0x1B, 0x3B), CORE3, ARG_FMAL},
+ {"sha1r", FMA (0x1B, 0x1E), CORE4A, ARG_FMAL},
+ {"sha256r", FMA (0x1B, 0x1F), CORE4A, ARG_FMAL},
+ {"vinsw", FMA (0x1B, 0x20), CORE3, ARG_FMAL},
+ {"vinsf", FMA (0x1B, 0x21), CORE3, ARG_FMAL},
+ {"vextw", FMA (0x1B, 0x22), CORE3, {FA, FMALIT, DFC1}},
+ {"vextf", FMA (0x1B, 0x23), CORE3, {FA, FMALIT, DFC1}},
+ {"vcpyw", FMA (0x1B, 0x24), CORE3, {FA, DFC1}},
+ {"vcpyf", FMA (0x1B, 0x25), CORE3, {FA, DFC1}},
+ {"vconw", FMA (0x1B, 0x26), CORE3, ARG_FMA},
+ {"vshfw", FMA (0x1B, 0x27), CORE3, ARG_FMA},
+ {"vcons", FMA (0x1B, 0x28), CORE3, ARG_FMA},
+ {"vcond", FMA (0x1B, 0x29), CORE3, ARG_FMA},
+ {"vinsb", FMA (0x1B, 0x2A), CORE4A, ARG_FMAL},
+ {"vinsh", FMA (0x1B, 0x2B), CORE4A, ARG_FMAL},
+ {"vinsectlh", FMA (0x1B, 0x2C), CORE4A, {FA, FB, ZC2, DFC1}},
+ {"vinsectlw", FMA (0x1B, 0x2D), CORE4A, {FA, FB, ZC2, DFC1}},
+ {"vinsectll", FMA (0x1B, 0x2E), CORE4A, {FA, FB, ZC2, DFC1}},
+ {"vinsectlb", FMA (0x1B, 0x2F), CORE4A, {FA, FB, ZC2, DFC1}},
+ {"vshfq", FMA (0x1B, 0x30), CORE4A, ARG_FMAL},
+ {"vshfqb", FMA (0x1B, 0x31), CORE4A, {FA, FB, ZC2, DFC1}},
+ {"vcpyb", FMA (0x1B, 0x32), CORE4A, {FA, DFC1}},
+ {"vcpyh", FMA (0x1B, 0x33), CORE4A, {FA, DFC1}},
+ {"vsm3r", FMA (0x1B, 0x34), CORE4A, ARG_FMAL},
+ {"vfcvtsh", FMA (0x1B, 0x35), CORE4A, ARG_FMAL},
+ {"vfcvths", FMA (0x1B, 0x36), CORE4A, {FA, FMALIT, FC}},
+ {"fcvtsh", FMA (0x1B, 0x37), CORE4A, {FA, FB, FMALIT, DFC1}},
+ {"sha512msl1", FMA (0x1B, 0x3C), CORE4A, ARG_FMA},
+ {"sha512r0", FMA (0x1B, 0x3D), CORE4A, ARG_FMA},
+ {"sha512r1", FMA (0x1B, 0x3E), CORE4A, ARG_FMA},
+ {"vldw_u", ATMEM (0x1C, 0x0), CORE3, ARG_VUAMEM},
+ {"vstw_u", ATMEM (0x1C, 0x1), CORE3, ARG_VUAMEM},
+ {"vlds_u", ATMEM (0x1C, 0x2), CORE3, ARG_VUAMEM},
+ {"vsts_u", ATMEM (0x1C, 0x3), CORE3, ARG_VUAMEM},
+ {"vldd_u", ATMEM (0x1C, 0x4), CORE3, ARG_VUAMEM},
+ {"vstd_u", ATMEM (0x1C, 0x5), CORE3, ARG_VUAMEM},
+ {"vstw_ul", ATMEM (0x1C, 0x8), CORE3, ARG_VUAMEM},
+ {"vstw_uh", ATMEM (0x1C, 0x9), CORE3, ARG_VUAMEM},
+ {"vsts_ul", ATMEM (0x1C, 0xA), CORE3, ARG_VUAMEM},
+ {"vsts_uh", ATMEM (0x1C, 0xB), CORE3, ARG_VUAMEM},
+ {"vstd_ul", ATMEM (0x1C, 0xC), CORE3, ARG_VUAMEM},
+ {"vstd_uh", ATMEM (0x1C, 0xD), CORE3, ARG_VUAMEM},
+ {"vldd_nc", ATMEM (0x1C, 0xE), CORE3, ARG_VUAMEM},
+ {"vstd_nc", ATMEM (0x1C, 0xF), CORE3, ARG_VUAMEM},
+ {"lbr", BRA (0x1D), CORE4A, {BDISP26}},
+ {"ldbu_a", ATMEM (0x1E, 0x0), CORE4A, ARG_ATMEM},
+ {"ldhu_a", ATMEM (0x1E, 0x1), CORE4A, ARG_ATMEM},
+ {"ldw_a", ATMEM (0x1E, 0x2), CORE4A, ARG_ATMEM},
+ {"ldl_a", ATMEM (0x1E, 0x3), CORE4A, ARG_ATMEM},
+ {"flds_a", ATMEM (0x1E, 0x4), CORE4A, ARG_VUAMEM},
+ {"fldd_a", ATMEM (0x1E, 0x5), CORE4A, ARG_VUAMEM},
+ {"stb_a", ATMEM (0x1E, 0x6), CORE4A, ARG_ATMEM},
+ {"sth_a", ATMEM (0x1E, 0x7), CORE4A, ARG_ATMEM},
+ {"stw_a", ATMEM (0x1E, 0x8), CORE4A, ARG_ATMEM},
+ {"stl_a", ATMEM (0x1E, 0x9), CORE4A, ARG_ATMEM},
+ {"fsts_a", ATMEM (0x1E, 0xA), CORE4A, ARG_VUAMEM},
+ {"fstd_a", ATMEM (0x1E, 0xB), CORE4A, ARG_VUAMEM},
+ {"dpfhr", ATMEM (0x1E, 0xE), CORE4A, {DPFTH, ATMDISP, PRB}},
+ {"dpfhw", ATMEM (0x1E, 0xF), CORE4A, {DPFTH, ATMDISP, PRB}},
+ {"flushd", MEM (0x20), CORE3, ARG_PREFETCH},
+ {"ldbu", MEM (0x20), CORE3, ARG_MEM},
+ {"evictdg", MEM (0x21), CORE3, ARG_PREFETCH},
+ {"ldhu", MEM (0x21), CORE3, ARG_MEM},
+ {"s_fillcs", MEM (0x22), CORE3, ARG_PREFETCH},
+ {"ldw", MEM (0x22), CORE3, ARG_MEM},
+ {"wh64", MFC (0x22, 0xF800), CORE3, {ZA, PRB}},
+ {"s_fillde", MEM (0x23), CORE3, ARG_PREFETCH},
+ {"ldl", MEM (0x23), CORE3, ARG_MEM},
+ {"evictdl", MEM (0x24), CORE3, ARG_PREFETCH},
+ {"ldl_u", MEM (0x24), CORE3, ARG_MEM},
+ {"pri_ldw/p", SW6HWMEM (0x25, 0x0), CORE3, ARG_SW6HWMEM},
+ {"pri_ldw_inc/p", SW6HWMEM (0x25, 0x2), CORE3, ARG_SW6HWMEM},
+ {"pri_ldw_dec/p", SW6HWMEM (0x25, 0x4), CORE3, ARG_SW6HWMEM},
+ {"pri_ldw_set/p", SW6HWMEM (0x25, 0x6), CORE3, ARG_SW6HWMEM},
+ {"pri_ldw/v", SW6HWMEM (0x25, 0x8), CORE3, ARG_SW6HWMEM},
+ {"pri_ldw/vpte", SW6HWMEM (0x25, 0xA), CORE3, ARG_SW6HWMEM},
+ {"pri_ldl/p", SW6HWMEM (0x25, 0x1), CORE3, ARG_SW6HWMEM},
+ {"pri_ldl_inc/p", SW6HWMEM (0x25, 0x3), CORE3, ARG_SW6HWMEM},
+ {"pri_ldl_dec/p", SW6HWMEM (0x25, 0x5), CORE3, ARG_SW6HWMEM},
+ {"pri_ldl_set/p", SW6HWMEM (0x25, 0x7), CORE3, ARG_SW6HWMEM},
+ {"pri_ldl/v", SW6HWMEM (0x25, 0x9), CORE3, ARG_SW6HWMEM},
+ {"pri_ldl/vpte", SW6HWMEM (0x25, 0xB), CORE3, ARG_SW6HWMEM},
+ {"fillde", MEM (0x26), CORE3, ARG_PREFETCH},
+ {"flds", MEM (0x26), CORE3, ARG_FMEM},
+ {"fillde_e", MEM (0x27), CORE3, ARG_PREFETCH},
+ {"fldd", MEM (0x27), CORE3, ARG_FMEM},
+
+ {"stb", MEM (0x28), CORE3, ARG_MEM},
+ {"sth", MEM (0x29), CORE3, ARG_MEM},
+ {"stw", MEM (0x2A), CORE3, ARG_MEM},
+ {"stl", MEM (0x2B), CORE3, ARG_MEM},
+ {"stl_u", MEM (0x2C), CORE3, ARG_MEM},
+ {"pri_stw/p", SW6HWMEM (0x2D, 0x0), CORE3, ARG_SW6HWMEM},
+ {"pri_stw/v", SW6HWMEM (0x2D, 0x8), CORE3, ARG_SW6HWMEM},
+ {"pri_stl/p", SW6HWMEM (0x2D, 0x1), CORE3, ARG_SW6HWMEM},
+ {"pri_stl/v", SW6HWMEM (0x2D, 0x9), CORE3, ARG_SW6HWMEM},
+ {"fsts", MEM (0x2E), CORE3, ARG_FMEM},
+ {"fstd", MEM (0x2F), CORE3, ARG_FMEM},
+ {"beq", BRA (0x30), CORE3, ARG_BRA},
+ {"bne", BRA (0x31), CORE3, ARG_BRA},
+ {"blt", BRA (0x32), CORE3, ARG_BRA},
+ {"ble", BRA (0x33), CORE3, ARG_BRA},
+ {"bgt", BRA (0x34), CORE3, ARG_BRA},
+ {"bge", BRA (0x35), CORE3, ARG_BRA},
+ {"blbc", BRA (0x36), CORE3, ARG_BRA},
+ {"blbs", BRA (0x37), CORE3, ARG_BRA},
+
+ {"fbeq", BRA (0x38), CORE3, ARG_FBRA},
+ {"fbne", BRA (0x39), CORE3, ARG_FBRA},
+ {"fblt", BRA (0x3A), CORE3, ARG_FBRA},
+ {"fble", BRA (0x3B), CORE3, ARG_FBRA},
+ {"fbgt", BRA (0x3C), CORE3, ARG_FBRA},
+ {"fbge", BRA (0x3D), CORE3, ARG_FBRA},
+ {"ldi", MEM (0x3E), CORE3, {RA, MDISP, ZB}},
+ {"ldi", MEM (0x3E), CORE3, ARG_MEM},
+ {"ldih", MEM (0x3F), CORE3, {RA, MDISP, ZB}},
+ {"ldih", MEM (0x3F), CORE3, ARG_MEM},
+ {"unop", MEM_ (0x3F) | (30 << 16), MEM_MASK, CORE3, {ZA}},
+};
+
+const unsigned sw_64_num_opcodes
+ = sizeof (sw_64_opcodes) / sizeof (*sw_64_opcodes);