Permalink
Switch branches/tags
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
12207 lines (12154 sloc) 415 KB
diff -Naur '--exclude=*.swp' gcc-6.2.0/config.sub gcc-6.2.0-zip/config.sub
--- gcc-6.2.0/config.sub 2015-12-31 16:13:28.000000000 -0500
+++ gcc-6.2.0-zip/config.sub 2017-01-11 11:07:21.116065311 -0500
@@ -355,6 +355,14 @@
xscaleel)
basic_machine=armel-unknown
;;
+ zip-*-linux*)
+ basic_machine=zip
+ os=-linux
+ ;;
+ zip*)
+ basic_machine=zip-unknown
+ os=-none
+ ;;
# We use `pc' rather than `unknown'
# because (1) that's what they normally are, and
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure gcc-6.2.0-zip/configure
--- gcc-6.2.0/configure 2016-03-17 18:54:19.000000000 -0400
+++ gcc-6.2.0-zip/configure 2017-02-06 21:54:22.244807700 -0500
@@ -3548,6 +3548,44 @@
ft32-*-*)
noconfigdirs="$noconfigdirs ${libgcj}"
;;
+ zip*)
+ noconfigdirs="$noconfigdirs ${libgcj}"
+ noconfigdirs="$noconfigdirs target-boehm-gc"
+ noconfigdirs="$noconfigdirs target-libgfortran"
+ # noconfigdirs="$noconfigdirs target-libsanitizer"
+ # noconfigdirs="$noconfigdirs target-libada"
+ # noconfigdirs="$noconfigdirs target-libatomic"
+ # noconfigdirs="$noconfigdirs target-libcilkrts"
+ # noconfigdirs="$noconfigdirs target-libitm"
+ # noconfigdirs="$noconfigdirs target-libquadmath"
+ # noconfigdirs="$noconfigdirs target-libstdc++-v3"
+ # noconfigdirs="$noconfigdirs target-libssp"
+ # noconfigdirs="$noconfigdirs target-libgo"
+ # noconfigdirs="$noconfigdirs target-libgomp"
+ # noconfigdirs="$noconfigdirs target-libvtv"
+ # noconfigdirs="$noconfigdirs target-libobjc"
+ # target-libgcc
+ # target-liboffloadmic
+ # target-libmpx # Only gets enabled by request
+ # target-libbacktrace
+ # ${libgcj}
+ # target-boehm-gc
+ # target-libada
+ # target-libatomic
+ # target-libcilkrts
+ # target-libgfortran
+ # target-libgo
+ # target-libgomp
+ # target-libitm
+ # target-libobjc
+ # target-libquadmath
+ # target-libsanitizer
+ # target-libstdc++-v3
+ # target-libssp
+ # target-libvtv
+ # target-libgloss
+ # target-newlib
+ ;;
*-*-lynxos*)
noconfigdirs="$noconfigdirs ${libgcj}"
;;
@@ -3575,6 +3613,9 @@
*-*-aix*)
noconfigdirs="$noconfigdirs target-libgo"
;;
+ zip*)
+ noconfigdirs="$noconfigdirs target-libgo"
+ ;;
esac
fi
@@ -3971,6 +4012,9 @@
vax-*-*)
noconfigdirs="$noconfigdirs target-newlib target-libgloss"
;;
+ zip*)
+ noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof"
+ ;;
esac
# If we aren't building newlib, then don't build libgloss, since libgloss
@@ -6785,16 +6829,16 @@
# CFLAGS_FOR_TARGET and CXXFLAGS_FOR_TARGET.
if test "x$CFLAGS_FOR_TARGET" = x; then
if test "x${is_cross_compiler}" = xyes; then
- CFLAGS_FOR_TARGET="-g -O2"
+ CFLAGS_FOR_TARGET="-O3"
else
CFLAGS_FOR_TARGET=$CFLAGS
case " $CFLAGS " in
- *" -O2 "*) ;;
- *) CFLAGS_FOR_TARGET="-O2 $CFLAGS_FOR_TARGET" ;;
+ *" -O3 "*) ;;
+ *) CFLAGS_FOR_TARGET="-O3 $CFLAGS_FOR_TARGET" ;;
esac
case " $CFLAGS " in
*" -g "* | *" -g3 "*) ;;
- *) CFLAGS_FOR_TARGET="-g $CFLAGS_FOR_TARGET" ;;
+ *) CFLAGS_FOR_TARGET="$CFLAGS_FOR_TARGET" ;;
esac
fi
fi
@@ -6802,16 +6846,16 @@
if test "x$CXXFLAGS_FOR_TARGET" = x; then
if test "x${is_cross_compiler}" = xyes; then
- CXXFLAGS_FOR_TARGET="-g -O2"
+ CXXFLAGS_FOR_TARGET="-O3"
else
CXXFLAGS_FOR_TARGET=$CXXFLAGS
case " $CXXFLAGS " in
- *" -O2 "*) ;;
- *) CXXFLAGS_FOR_TARGET="-O2 $CXXFLAGS_FOR_TARGET" ;;
+ *" -O3 "*) ;;
+ *) CXXFLAGS_FOR_TARGET="-O3 $CXXFLAGS_FOR_TARGET" ;;
esac
case " $CXXFLAGS " in
*" -g "* | *" -g3 "*) ;;
- *) CXXFLAGS_FOR_TARGET="-g $CXXFLAGS_FOR_TARGET" ;;
+ *) CXXFLAGS_FOR_TARGET="$CXXFLAGS_FOR_TARGET" ;;
esac
fi
fi
diff -Naur '--exclude=*.swp' gcc-6.2.0/configure.ac gcc-6.2.0-zip/configure.ac
--- gcc-6.2.0/configure.ac 2016-03-17 18:54:19.000000000 -0400
+++ gcc-6.2.0-zip/configure.ac 2017-01-10 12:43:23.819301273 -0500
@@ -884,6 +884,9 @@
ft32-*-*)
noconfigdirs="$noconfigdirs ${libgcj}"
;;
+ zip*)
+ noconfigdirs="$noconfigdirs ${libgcj}"
+ ;;
*-*-lynxos*)
noconfigdirs="$noconfigdirs ${libgcj}"
;;
@@ -911,6 +914,9 @@
*-*-aix*)
noconfigdirs="$noconfigdirs target-libgo"
;;
+ zip*)
+ noconfigdirs="$noconfigdirs target-libgo"
+ ;;
esac
fi
@@ -1307,6 +1313,10 @@
vax-*-*)
noconfigdirs="$noconfigdirs target-newlib target-libgloss"
;;
+ zip*)
+ noconfigdirs="$noconfigdirs target-libffi target-boehm-gc gdb gprof ${libgcj}"
+ unsupported_languages="$unsupported_languages fortran"
+ ;;
esac
# If we aren't building newlib, then don't build libgloss, since libgloss
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cfgexpand.c gcc-6.2.0-zip/gcc/cfgexpand.c
--- gcc-6.2.0/gcc/cfgexpand.c 2016-04-27 08:23:50.000000000 -0400
+++ gcc-6.2.0-zip/gcc/cfgexpand.c 2016-12-31 16:38:36.195534819 -0500
@@ -74,6 +74,15 @@
#include "tree-chkp.h"
#include "rtl-chkp.h"
+
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#define ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void zip_debug_rtx(const_rtx);
+#else
+#define ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
/* Some systems use __main in a way incompatible with its use in gcc, in these
cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
give the same symbol without quotes for an alternative entry point. You
@@ -1172,7 +1181,7 @@
base_align = crtl->max_used_stack_slot_alignment;
else
base_align = MAX (crtl->max_used_stack_slot_alignment,
- GET_MODE_ALIGNMENT (SImode)
+ GET_MODE_ALIGNMENT (word_mode)
<< ASAN_SHADOW_SHIFT);
}
else
@@ -2225,7 +2234,7 @@
data.asan_vec.safe_push (offset);
/* Leave space for alignment if STRICT_ALIGNMENT. */
if (STRICT_ALIGNMENT)
- alloc_stack_frame_space ((GET_MODE_ALIGNMENT (SImode)
+ alloc_stack_frame_space ((GET_MODE_ALIGNMENT (word_mode)
<< ASAN_SHADOW_SHIFT)
/ BITS_PER_UNIT, 1);
@@ -5745,7 +5754,7 @@
&& (last = get_last_insn ())
&& JUMP_P (last))
{
- rtx dummy = gen_reg_rtx (SImode);
+ rtx dummy = gen_reg_rtx (word_mode);
emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
}
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/cgraphbuild.c gcc-6.2.0-zip/gcc/cgraphbuild.c
--- gcc-6.2.0/gcc/cgraphbuild.c 2016-01-04 09:30:50.000000000 -0500
+++ gcc-6.2.0-zip/gcc/cgraphbuild.c 2016-12-31 16:39:44.963107994 -0500
@@ -32,6 +32,15 @@
#include "ipa-utils.h"
#include "except.h"
+
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#define ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s;%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void zip_debug_rtx(const_rtx);
+#else
+#define ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
/* Context of record_reference. */
struct record_reference_ctx
{
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/combine.c gcc-6.2.0-zip/gcc/combine.c
--- gcc-6.2.0/gcc/combine.c 2016-08-08 06:06:15.000000000 -0400
+++ gcc-6.2.0-zip/gcc/combine.c 2017-02-03 09:25:19.676720321 -0500
@@ -103,6 +103,15 @@
#include "rtl-iter.h"
#include "print-rtl.h"
+#define DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#define ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void zip_debug_rtx(const_rtx);
+#else
+#define ZIP_DEBUG_LINE(STR,RTX)
+#endif
+
#ifndef LOAD_EXTEND_OP
#define LOAD_EXTEND_OP(M) UNKNOWN
#endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/common/config/zip/zip-common.c gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c
--- gcc-6.2.0/gcc/common/config/zip/zip-common.c 1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/common/config/zip/zip-common.c 2017-01-11 09:41:34.483106099 -0500
@@ -0,0 +1,52 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename: common/config/zip/zip-common.c
+//
+// Project: Zip CPU backend for the GNU Compiler Collection
+//
+// Purpose: To eliminate the frame register automatically.
+//
+// Creator: Dan Gisselquist, Ph.D.
+// Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program. (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.) If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License: GPL, v3, as defined and found on www.gnu.org,
+// http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "common/common-target.h"
+#include "common/common-target-def.h"
+
+static const struct default_options zip_option_optimization_table[] =
+ {
+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
+ { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
+#define TARGET_OPTION_OPTIMIZATION_TABLE zip_option_optimization_table
+
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/genzipops.c gcc-6.2.0-zip/gcc/config/zip/genzipops.c
--- gcc-6.2.0/gcc/config/zip/genzipops.c 1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/genzipops.c 2017-03-07 12:03:59.062584503 -0500
@@ -0,0 +1,444 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename: genzipops.c
+//
+// Project: Zip CPU -- a small, lightweight, RISC CPU soft core
+//
+// Purpose: This program generates the zip-ops.md machine description file.
+//
+// While I understand that this is not GCC's preferred method of generating
+// machine description files, there were just so many instructions to
+// generate, and so many forms of them, and the GCC infrastructure didn't
+// support the conditional execution model of the ZipCPU that ... I built
+// it this way.
+//
+// As of this writing, building zip-ops.md is not an automatic part of
+// making GCC. To build genzipops, just type:
+//
+// g++ genzipops.c -o genzipops
+//
+// And to run it, type:
+//
+// genzipops > zip-ops.md
+//
+// genzipops takes no arguments, and does nothing but write the machine
+// descriptions to the standard output.
+//
+//
+// Creator: Dan Gisselquist, Ph.D.
+// Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2017, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program. (It's in the $(ROOT)/doc directory. Run make with no
+// target there if the PDF file isn't present.) If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License: GPL, v3, as defined and found on www.gnu.org,
+// http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+//
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+void legal(FILE *fp) {
+ fprintf(fp, ""
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";; Filename: zip-ops.md\n"
+";;\n"
+";; Project: Zip CPU -- a small, lightweight, RISC CPU soft core\n"
+";;\n"
+";; Purpose: This is a computer generated machine description of the\n"
+";; ZipCPU\'s operations. It is computer generated simply for\n"
+";; two reasons. First, I can\'t seem to find a way to generate this\n"
+";; information within GCC\'s current constructs. Specifically, the\n"
+";; CPU\'s instructions normally set the condition codes, unless they\n"
+";; are conditional instructions when they don\'t. Second, the ZipCPU is\n"
+";; actually quite regular. Almost all of the instructions have the same\n"
+";; form. This form turns into many, many RTL instructions. Because the\n"
+";; CPU doesn\'t match any of the others within GCC, that means either\n"
+";; I have a *lot* of cut, copy, paste, and edit to do to create the file\n"
+";; and upon any and every edit, or I need to build a program to generate\n"
+";; the remaining .md constructs. Hence, I chose the latter to minimize\n"
+";; the amount of work I needed to do.\n"
+";;\n"
+";;\n"
+";; Creator: Dan Gisselquist, Ph.D.\n"
+";; Gisselquist Technology, LLC\n"
+";;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";; Copyright (C) 2017, Gisselquist Technology, LLC\n"
+";;\n"
+";; This program is free software (firmware): you can redistribute it and/or\n"
+";; modify it under the terms of the GNU General Public License as published\n"
+";; by the Free Software Foundation, either version 3 of the License, or (at\n"
+";; your option) any later version.\n"
+";;\n"
+";; This program is distributed in the hope that it will be useful, but WITHOUT\n"
+";; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or\n"
+";; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n"
+";; for more details.\n"
+";;\n"
+";; License: GPL, v3, as defined and found on www.gnu.org,\n"
+";; http://www.gnu.org/licenses/gpl.html\n"
+";;\n"
+";;\n"
+";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n"
+";;\n"
+";;\n");
+}
+
+void gen_heading(FILE *fp, const char *heading) {
+ fprintf(fp, ";\n;\n; %s\n;\n;\n", heading);
+}
+
+void genzip_condop(FILE *fp, const char *md_opname,
+ const char *rtxstr, const char *insn_cond,
+ const char *zip_op,
+ const char *rtx_cond, const char *zip_cond) {
+
+ fprintf(fp, "(define_insn \"%s_%s\"\n"
+ "\t[(cond_exec (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t%s)]\n"
+ "\t\"%s\"\t; Condition\n"
+ "\t\"%s.%s\\t%%1,%%0\t; genzip, conditional operator\"\t; Template\n"
+ "\t[(set_attr \"predicable\" \"no\") "
+ "(set_attr \"ccresult\" \"unchanged\")])\n;\n;\n",
+ md_opname, rtx_cond, rtx_cond, rtxstr, insn_cond, zip_op, zip_cond);
+
+}
+
+void genzipop_long(FILE *fp, const char *md_opname, const char *uncond_rtx, const char *insn_cond, const char *split_rtx, const char *dup_rtx, const char *zip_op) {
+ char heading[128];
+ sprintf(heading, "%s (genzipop_long)", zip_op);
+ fprintf(fp, ";\n;\n;\n; %s (genzipop_long)\n;\n;\n;\n", zip_op);
+
+ fprintf(fp, "(define_insn \"%s\"\n"
+"\t[%s\n"
+"\t(clobber (reg:CC CC_REG))]\n"
+"\t\"%s\"\n"
+"\t\"%s\\t%%2,%%0\t; %s\"\n"
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+ md_opname, uncond_rtx, insn_cond, zip_op, md_opname);
+
+
+ fprintf(fp, "(define_insn \"%s_raw\"\n"
+"\t[%s\n"
+"\t(set (reg:CC CC_REG) (compare:CC (match_dup 0) (const_int 0)))]\n"
+"\t\"%s\"\n"
+"\t\"%s\\t%%1,%%0\t; %s_raw\"\n"
+"\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"set\")])\n;\n;\n",
+ md_opname, dup_rtx, insn_cond, zip_op, md_opname);
+
+ genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "eq", "Z");
+ genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ne", "NZ");
+ genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "lt", "LT");
+ genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ge", "GE");
+ genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "ltu", "C");
+ genzip_condop(fp, md_opname, dup_rtx, insn_cond, zip_op, "geu", "NC");
+}
+
+void genzipop(FILE *fp, const char *md_opname, const char *rtx_name, const char *insn_cond, const char *zip_op) {
+ char rtxstr[512], splitstr[512], dupstr[512], altname[64];
+
+ sprintf(rtxstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t\t(match_operand:SI 2 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+ sprintf(splitstr,
+ "(set (match_dup 0) (%s (match_dup 0) (match_dup 2)))", rtx_name);
+
+ sprintf(dupstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\")))", rtx_name);
+
+ genzipop_long(fp, md_opname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+
+ sprintf(rtxstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_operand:SI 1 \"register_operand\" \"0\")\n"
+"\t\t\t(plus:SI (match_operand:SI 2 \"register_operand\" \"r\")\n"
+"\t\t\t\t(match_operand:SI 3 \"const_int_operand\" \"N\"))))", rtx_name);
+ sprintf(splitstr,
+ "(set (match_dup 0) (%s (match_dup 0)\n"
+"\t\t\t(plus:SI (match_dup 2) (match_dup 3))))", rtx_name);
+
+ sprintf(dupstr,
+"(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+"\t\t(%s (match_dup 0)\n"
+"\t\t\t(plus:SI (match_operand:SI 1 \"register_operand\" \"r\")\n"
+"\t\t\t\t(match_operand:SI 2 \"const_int_operand\" \"N\"))))", rtx_name);
+
+ sprintf(altname, "%s_off", md_opname);
+
+ genzipop_long(fp, altname, rtxstr, insn_cond, splitstr, dupstr, zip_op);
+}
+
+void gencmov(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+ fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+ "\t[(set (match_operand:SI 0 \"register_operand\" \"=r,r,r,Q\")\n"
+ "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t(match_operand:SI 1 \"general_operand\" \"r,Q,i,r\")\n"
+ "\t\t\t(match_dup 0)))]\n"
+ "\t\"\"\n"
+ "\t\"@\n"
+ "\tMOV.%s\t%%1,%%0\t; cmov\n"
+ "\tLW.%s\t%%1,%%0\t; cmov\n"
+ "\tLDI.%s\t%%1,%%0\t; cmov\n"
+ "\tSW.%s\t%%1,%%0\t; cmov\"\n"
+ "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+ md_opname, md_cond, md_cond, zip_cond, zip_cond, zip_cond, zip_cond);
+
+}
+
+void gencadd(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+ fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+ "\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+ "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t(plus:SI (match_dup 0)\n"
+ "\t\t\t\t(match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+ "\t\t\t(match_dup 0)))]\n"
+ "\t\"\"\n"
+ "\t\"ADD.%s\t%%1,%%0\t; cadd\"\n"
+ "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+ md_opname, md_cond, md_cond, zip_cond);
+}
+
+void gencnot(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+ fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+ "\t[(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+ "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t(xor:SI (match_dup 0)\n"
+ "\t\t\t\t(const_int -1))\n"
+ "\t\t\t(match_dup 0)))]\n"
+ "\t\"\"\n"
+ "\t\"NOT.%s\t%%0\t; cnot\"\n"
+ "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+ md_opname, md_cond, md_cond, zip_cond);
+}
+
+void gencneg(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+ fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+ "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+ "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t(neg:SI (match_dup 0))\n"
+ "\t\t\t(match_dup 0)))]\n"
+ "\t\"\"\n"
+ "\t\"NEG.%s\t%%0\t; cneg\"\n"
+ "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+ md_opname, md_cond, md_cond, zip_cond);
+}
+
+
+void gencand(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+ fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+ "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+ "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t(and:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+ "\t\t\t(match_dup 0)))]\n"
+ "\t\"\"\n"
+ "\t\"AND.%s\t%%1,%%0\t; cand\"\n"
+ "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+ md_opname, md_cond, md_cond, zip_cond);
+}
+
+
+void gencior(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+ fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+ "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+ "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t(ior:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+ "\t\t\t(match_dup 0)))]\n"
+ "\t\"\"\n"
+ "\t\"OR.%s\t%%1,%%0\t; cior\"\n"
+ "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+ md_opname, md_cond, md_cond, zip_cond);
+}
+
+void gencxor(FILE *fp, const char *md_opname, const char *md_cond, const char *zip_cond) {
+ fprintf(fp, ";\n;\n"
+"(define_insn \"%s_%s\"\n"
+ "\t[(set (match_operand:SI 0 \"register_operand\" \"+r\")\n"
+ "\t\t(if_then_else:SI (%s (reg:CC CC_REG) (const_int 0))\n"
+ "\t\t\t(xor:SI (match_dup 0) (match_operand:SI 1 \"zip_opb_single_operand_p\" \"rO\"))\n"
+ "\t\t\t(match_dup 0)))]\n"
+ "\t\"\"\n"
+ "\t\"XOR.%s\t%%1,%%0\t; cxor\"\n"
+ "\t[(set_attr \"predicable\" \"no\") (set_attr \"ccresult\" \"unchanged\")])\n",
+ md_opname, md_cond, md_cond, zip_cond);
+}
+
+void usage(void) {
+ printf("USAGE: genzipops <new-zip-ops.md filename>\n");
+}
+
+const char *TMPPATH = ".zip-ops.md";
+const char *TAILPATH = "zip-ops.md";
+
+int main(int argc, char **argv) {
+ FILE *fp = fopen(TMPPATH, "w");
+ const char *newname = TAILPATH;
+
+ if ((argc>1)&&(argv[1][0] == '-')) {
+ usage();
+ exit(EXIT_FAILURE);
+ }
+
+ if (argc>1) {
+ if ((strlen(argv[1])>=strlen(TAILPATH))
+ &&(strcmp(&argv[1][strlen(argv[1])-strlen(TAILPATH)],
+ TAILPATH)==0)
+ &&(access(argv[1], F_OK)==0))
+ unlink(argv[1]);
+ newname = argv[1];
+ }
+
+ legal(fp);
+ genzipop(fp, "addsi3", "plus:SI", "", "ADD");
+ genzipop(fp, "subsi3", "minus:SI", "", "SUB");
+ genzipop(fp, "mulsi3", "mult:SI", "", "MPY");
+ genzipop(fp, "divsi3", "div:SI", "(ZIP_DIVIDE)", "DIVS");
+ genzipop(fp, "udivsi3", "udiv:SI", "(ZIP_DIVIDE)", "DIVU");
+ genzipop(fp, "andsi3", "and:SI", "", "AND");
+ genzipop(fp, "iorsi3", "ior:SI", "", "OR");
+ genzipop(fp, "xorsi3", "xor:SI", "", "XOR");
+ genzipop(fp, "ashrsi3", "ashiftrt:SI","", "ASR");
+ genzipop(fp, "ashlsi3", "ashift:SI", "", "LSL");
+ genzipop(fp, "lshrsi3", "lshiftrt:SI","", "LSR");
+
+ genzipop_long(fp, "smulsi_highpart",
+ "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+ "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+ "\t\t\t(sign_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+ "\t\t\t(sign_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+ "\t\t\t(const_int 32))))",
+ "(ZIP_HAS_DI)",
+ "(set (match_dup 0)\n"
+ "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+ "\t\t\t(sign_extend:DI (match_dup 1))\n"
+ "\t\t\t(sign_extend:DI (match_dup 2)))\n"
+ "\t\t\t(const_int 32))))",
+ //
+ "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+ "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+ "\t\t\t(sign_extend:DI (match_dup 0))\n"
+ "\t\t\t(sign_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+ "\t\t\t(const_int 32))))",
+ "MPYSHI");
+ genzipop_long(fp, "umulsi_highpart",
+ "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+ "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+ "\t\t\t(zero_extend:DI (match_operand:SI 1 \"register_operand\" \"0\"))\n"
+ "\t\t\t(zero_extend:DI (match_operand:SI 2 \"zip_opb_operand_p\" \"rO\")))\n"
+ "\t\t\t(const_int 32))))",
+ "(ZIP_HAS_DI)",
+ "(set (match_dup 0)\n"
+ "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+ "\t\t\t(zero_extend:DI (match_dup 1))\n"
+ "\t\t\t(zero_extend:DI (match_dup 2)))\n"
+ "\t\t\t(const_int 32))))",
+ //
+ "(set (match_operand:SI 0 \"register_operand\" \"=r\")\n"
+ "\t\t(truncate:SI (ashiftrt:DI (mult:DI\n"
+ "\t\t\t(zero_extend:DI (match_dup 0))\n"
+ "\t\t\t(zero_extend:DI (match_operand:SI 1 \"zip_opb_operand_p\" \"rO\")))\n"
+ "\t\t\t(const_int 32))))",
+ "MPYUHI");
+
+ gen_heading(fp, "Conditional move instructions");
+
+ gencmov(fp, "cmov", "eq", "Z");
+ gencmov(fp, "cmov", "ne", "NZ");
+ gencmov(fp, "cmov", "lt", "LT");
+ gencmov(fp, "cmov", "ge", "GE");
+ gencmov(fp, "cmov", "ltu", "C");
+ gencmov(fp, "cmov", "geu", "NC");
+
+ gen_heading(fp, "Conditional add instructions");
+
+ gencadd(fp, "cadd", "eq", "Z");
+ gencadd(fp, "cadd", "ne", "NZ");
+ gencadd(fp, "cadd", "lt", "LT");
+ gencadd(fp, "cadd", "ge", "GE");
+ gencadd(fp, "cadd", "ltu", "C");
+ gencadd(fp, "cadd", "geu", "NC");
+
+ gen_heading(fp, "Conditional negate instructions");
+
+ gencneg(fp, "cneg", "eq", "Z");
+ gencneg(fp, "cneg", "ne", "NZ");
+ gencneg(fp, "cneg", "lt", "LT");
+ gencneg(fp, "cneg", "ge", "GE");
+ gencneg(fp, "cneg", "ltu", "C");
+ gencneg(fp, "cneg", "geu", "NC");
+
+ gen_heading(fp, "Conditional not instructions");
+
+ gencnot(fp, "cnot", "eq", "Z");
+ gencnot(fp, "cnot", "ne", "NZ");
+ gencnot(fp, "cnot", "lt", "LT");
+ gencnot(fp, "cnot", "ge", "GE");
+ gencnot(fp, "cnot", "ltu", "C");
+ gencnot(fp, "cnot", "geu", "NC");
+
+ gen_heading(fp, "Conditional and instructions");
+
+ gencand(fp, "cand", "eq", "Z");
+ gencand(fp, "cand", "ne", "NZ");
+ gencand(fp, "cand", "lt", "LT");
+ gencand(fp, "cand", "ge", "GE");
+ gencand(fp, "cand", "ltu", "C");
+ gencand(fp, "cand", "geu", "NC");
+
+ gen_heading(fp, "Conditional ior instructions");
+
+ gencior(fp, "cior", "eq", "Z");
+ gencior(fp, "cior", "ne", "NZ");
+ gencior(fp, "cior", "lt", "LT");
+ gencior(fp, "cior", "ge", "GE");
+ gencior(fp, "cior", "ltu", "C");
+ gencior(fp, "cior", "geu", "NC");
+
+ gen_heading(fp, "Conditional xor instructions");
+
+ gencxor(fp, "cxor", "eq", "Z");
+ gencxor(fp, "cxor", "ne", "NZ");
+ gencxor(fp, "cxor", "lt", "LT");
+ gencxor(fp, "cxor", "ge", "GE");
+ gencxor(fp, "cxor", "ltu", "C");
+ gencxor(fp, "cxor", "geu", "NC");
+
+ fclose(fp);
+
+ if (rename(TMPPATH, newname) != 0) {
+ fprintf(stderr, "ERR: Could not create %s, leaving results in %s\n", newname, TMPPATH);
+ exit(EXIT_FAILURE);
+ } exit(EXIT_SUCCESS);
+}
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip.c gcc-6.2.0-zip/gcc/config/zip/zip.c
--- gcc-6.2.0/gcc/config/zip/zip.c 1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip.c 2017-03-07 12:03:18.566583672 -0500
@@ -0,0 +1,2679 @@
+////////////////////////////////////////////////////////////////////////////////
+//
+// Filename: zip.c
+//
+// Project: Zip CPU backend for the GNU Compiler Collection
+//
+// Purpose:
+//
+// Creator: Dan Gisselquist, Ph.D.
+// Gisselquist Technology, LLC
+//
+////////////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2016-2017, Gisselquist Technology, LLC
+//
+// This program is free software (firmware): you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as published
+// by the Free Software Foundation, either version 3 of the License, or (at
+// your option) any later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program. (It's in the $(ROOT)/doc directory, run make with no
+// target there if the PDF file isn't present.) If not, see
+// <http://www.gnu.org/licenses/> for a copy.
+//
+// License: GPL, v3, as defined and found on www.gnu.org,
+// http://www.gnu.org/licenses/gpl.html
+//
+//
+////////////////////////////////////////////////////////////////////////////////
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "lcm.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
+#include "predict.h"
+#include "basic-block.h"
+#include "bitmap.h"
+#include "df.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "machmode.h"
+#include "symtab.h"
+#include "rtlhash.h"
+#include "tree.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "expr.h"
+#include "function.h"
+#include "recog.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "builtins.h"
+#include "calls.h"
+#include "langhooks.h"
+#include "optabs.h"
+#include "explow.h"
+#include "emit-rtl.h"
+#include "ifcvt.h"
+#include "genrtl.h"
+
+// #include "tmp_p.h"
+#include "target.h"
+#include "target-def.h"
+// #include "tm-constrs.h"
+#include "tm-preds.h"
+
+#include "diagnostic.h"
+// #include "integrate.h"
+
+#include "zip-protos.h"
+
+static bool zip_return_in_memory(const_tree, const_tree);
+static bool zip_frame_pointer_required(void);
+
+static void zip_function_arg_advance(cumulative_args_t ca, enum machine_mode mode,
+ const_tree type, bool named);
+static rtx zip_function_arg(cumulative_args_t ca, enum machine_mode mode, const_tree type, bool named);
+
+static void zip_asm_trampoline_template(FILE *);
+static void zip_trampoline_init(rtx, tree, rtx);
+static void zip_init_builtins(void);
+static tree zip_builtin_decl(unsigned, bool);
+// static void zip_asm_output_anchor(rtx x);
+ void zip_asm_output_def(FILE *s, const char *n, const char *v);
+static rtx zip_expand_builtin(tree exp, rtx target, rtx subtarget,
+ enum machine_mode tmode, int ignore);
+static bool zip_scalar_mode_supported_p(enum machine_mode mode);
+static bool zip_libgcc_floating_mode_supported_p(enum machine_mode mode);
+static int zip_address_cost(rtx addr, enum machine_mode mode, addr_space_t as, bool spd);
+static bool zip_mode_dependent_address_p(const_rtx addr, addr_space_t);
+static unsigned HOST_WIDE_INT zip_const_anchor = 0x20000;
+static HOST_WIDE_INT zip_min_opb_imm = -0x20000;
+static HOST_WIDE_INT zip_max_opb_imm = 0x1ffff;
+static HOST_WIDE_INT zip_min_anchor_offset = -0x2000;
+static HOST_WIDE_INT zip_max_anchor_offset = 0x1fff;
+static HOST_WIDE_INT zip_min_mov_offset = -0x1000;
+static HOST_WIDE_INT zip_max_mov_offset = 0x0fff;
+static int zip_sched_issue_rate(void) { return 1; }
+static bool zip_legitimate_address_p(machine_mode, rtx, bool);
+static bool zip_legitimate_move_operand_p(machine_mode, rtx, bool);
+ void zip_debug_rtx_pfx(const char *, const_rtx x);
+ void zip_debug_rtx(const_rtx x);
+static void zip_override_options(void);
+static bool zip_can_eliminate(int from ATTRIBUTE_UNUSED, int to);
+static int zip_memory_move_cost(machine_mode, reg_class_t, bool);
+static rtx zip_legitimize_address(rtx x, rtx oldx, machine_mode mode);
+static bool zip_cannot_modify_jumps_p(void);
+static bool zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b);
+
+
+#define ZIP_ALL_DEBUG_OFF false
+#define ZIP_ALL_DEBUG_ON false
+#define ZIPDEBUGFLAG(A,B) const bool A = \
+ ((ZIP_ALL_DEBUG_ON)||(B))&&(!ZIP_ALL_DEBUG_OFF)
+
+enum ZIP_BUILTIN_ID_CODE {
+ ZIP_BUILTIN_RTU,
+ ZIP_BUILTIN_HALT,
+ ZIP_BUILTIN_IDLE,
+ ZIP_BUILTIN_SYSCALL,
+ ZIP_BUILTIN_SAVE_CONTEXT,
+ ZIP_BUILTIN_RESTORE_CONTEXT,
+ ZIP_BUILTIN_BITREV,
+ ZIP_BUILTIN_CC,
+ ZIP_BUILTIN_UCC,
+ ZIP_BUILTIN_BUSY,
+ ZIP_BUILTIN_MAX
+};
+
+static GTY (()) tree zip_builtins[(int)ZIP_BUILTIN_MAX];
+static enum insn_code zip_builtins_icode[(int)ZIP_BUILTIN_MAX];
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#undef TARGET_ASM_ALIGNED_SI_OP
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.short\t"
+#define TARGET_ASM_ALIGNED_SI_OP "\t.int\t"
+#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
+
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
+
+#include "gt-zip.h"
+
+/* The Global 'targetm' Variable. */
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+
+enum reg_class zip_reg_class(int);
+
+#define LOSE_AND_RETURN(msgid, x) \
+ do { \
+ zip_operand_lossage(msgid, x); \
+ return; \
+ } while(0)
+
+/* Per-function machine data. */
+struct GTY(()) machine_function
+{
+ /* number of pretented arguments for varargs */
+ int pretend_size;
+
+ /* Number of bytes saved on the stack for local variables. */
+ int local_vars_size;
+
+ /* Number of bytes saved on stack for register save area */
+ int saved_reg_size;
+ int save_ret;
+
+ int sp_fp_offset;
+ bool fp_needed;
+ int size_for_adjusting_sp;
+};
+
+/* Allocate a chunk of memory for per-function machine-dependent data. */
+
+static struct machine_function *
+zip_init_machine_status(void) {
+ return ggc_cleared_alloc<machine_function>();
+}
+
+static void
+zip_override_options(void)
+{
+ init_machine_status = zip_init_machine_status;
+}
+
+enum reg_class
+zip_reg_class(int regno)
+{
+ if (is_ZIP_GENERAL_REG(regno)) {
+ return GENERAL_REGS;
+ } else if (is_ZIP_REG(regno)) {
+ return ALL_REGS;
+ } return NO_REGS;
+}
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+static bool
+zip_return_in_memory(const_tree type, const_tree fntype ATTRIBUTE_UNUSED) {
+ const HOST_WIDE_INT size = int_size_in_bytes(type);
+ return (size == -1)||(size > 2*UNITS_PER_WORD);
+}
+
+/* Emit an error emssage when we're in an asm, and a fatal error for "normal"
+ * insn. Formatted output isn't easily implemented, since we use output operand
+ * lossage to output the actual message and handle the categorization of the
+ * error. */
+
+static void
+zip_operand_lossage(const char *msgid, rtx op) {
+ debug_rtx(op);
+ zip_debug_rtx(op);
+ output_operand_lossage("%s", msgid);
+}
+
+/* The PRINT_OPERAND_ADDRESS worker. */
+void
+zip_print_operand_address(FILE *file, rtx x) {
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) zip_debug_rtx(x);
+ switch(GET_CODE(x)) {
+ case REG:
+ gcc_assert(is_ZIP_REG(REGNO(x)));
+ gcc_assert(REGNO(x) < 16);
+ fprintf(file, "(%s)", reg_names[REGNO(x)]);
+ break;
+ case SYMBOL_REF:
+ fprintf(file, "%s", XSTR(x,0));
+ break;
+ case LABEL_REF:
+ x = LABEL_REF_LABEL(x);
+ case CODE_LABEL:
+ { char buf[256];
+ ASM_GENERATE_INTERNAL_LABEL(buf, "L", CODE_LABEL_NUMBER(x));
+#ifdef ASM_OUTPUT_LABEL_REF
+ ASM_OUTPUT_LABEL_REF(file, buf);
+#else
+ assemble_name(file, buf);
+#endif
+ }
+ break;
+ case PLUS:
+ if (!REG_P(XEXP(x, 0))) {
+ fprintf(stderr, "Unsupported address construct\n");
+ zip_debug_rtx(x);
+ abort();
+ } gcc_assert(is_ZIP_REG(REGNO(XEXP(x,0))));
+ gcc_assert(REGNO(XEXP(x,0))<16);
+ if (CONST_INT_P(XEXP(x, 1))) {
+ if (INTVAL(XEXP(x,1))!=0) {
+ fprintf(file, "%ld(%s)",
+ (long)INTVAL(XEXP(x, 1)),
+ reg_names[REGNO(XEXP(x, 0))]);
+ } else {
+ fprintf(file, "(%s)",
+ reg_names[REGNO(XEXP(x, 0))]);
+ }
+ } else if (GET_CODE(XEXP(x,1)) == SYMBOL_REF) {
+ fprintf(file, "%s(%s)", XSTR(x,0),
+ reg_names[REGNO(XEXP(x, 0))]);
+ } else if ((GET_CODE(XEXP(x, 1)) == MINUS)
+ && (GET_CODE(XEXP(XEXP(x, 1), 0))==SYMBOL_REF)
+ && (GET_CODE(XEXP(XEXP(x, 1), 1))==SYMBOL_REF)) {
+ fprintf(file, "%s-%s(%s)",
+ XSTR(XEXP(XEXP(x, 1),0),0),
+ XSTR(XEXP(XEXP(x, 1),1),0),
+ reg_names[REGNO(XEXP(x, 0))]);
+ } else
+ fprintf(file, "#INVALID(%s)",
+ reg_names[REGNO(XEXP(x, 0))]);
+ /*
+ else if (GET_CODE(XEXP(addr, 1)) == LABEL)
+ fprintf(file, "%s(%s)",
+ GET_CODE(XEXP(addr, 1)),
+ reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+ else if ((GET_CODE(XEXP(addr, 1)) == MINUS)
+ && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 0))==LABEL)
+ && (GET_CODE(XEXP(GET_CODE(XEXP(addr, 1)), 1))==LABEL)) {
+ fprintf(file, "%s-%s(%s)",
+ reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+ reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+ reg_names[REGNO(GET_CODE(XEXP(addr, 0)))]);
+ }
+ */
+ break;
+ // We don't support direct memory addressing within our
+ // instruction set, even though the instructions themselves
+ // would support direct memory addressing of the lower 18 bits
+ // of memory space.
+ case MEM:
+ if (dbg) zip_debug_rtx(x);
+ zip_print_operand_address(file, XEXP(x, 0));
+ break;
+ case CONST_INT:
+ fprintf(file, "%ld",(long)INTVAL(x));
+ break;
+ default:
+ fprintf(stderr, "Unknown address format\n");
+ zip_debug_rtx(x);
+ abort(); break;
+ // output_addr_const(file, x);
+ break;
+ }
+}
+
+/* The PRINT_OPERAND worker. */
+
+void
+zip_print_operand(FILE *file, rtx x, int code)
+{
+ rtx operand = x;
+ int rgoff = 0;
+
+ // fprintf(file, "Print Operand!\n");
+
+ /* New code entries should just be added to the switch below. If
+ * handling is finished, just return. If handling was just a
+ * modification of the operand, the modified operand should be put in
+ * "operand", and then do a break to let default handling
+ * (zero-modifier) output the operand.
+ */
+ switch(code) {
+ case 0:
+ /* No code, print as usual. */
+ break;
+ case 'L':
+ /* Lower of two registers, print one up */
+ rgoff = 1;
+ break;
+ case 'R':
+ case 'H':
+ /* Higher of a register pair, print normal */
+ break;
+
+ default:
+ LOSE_AND_RETURN("invalid operand modifier letter", x);
+ }
+
+ /* Print an operand as without a modifier letter. */
+ switch (GET_CODE(operand)) {
+ case REG:
+ if (REGNO(operand)+rgoff >= FIRST_PSEUDO_REGISTER)
+ internal_error("internal error: bad register: %d", REGNO(operand));
+ fprintf(file, "%s", reg_names[REGNO(operand)+rgoff]);
+ return;
+ case SCRATCH:
+ LOSE_AND_RETURN("Need a scratch register", x);
+ return;
+
+ case CODE_LABEL:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case PLUS:
+ PRINT_OPERAND_ADDRESS(file, operand);
+ return;
+ case MEM:
+ PRINT_OPERAND_ADDRESS(file, XEXP(operand, 0));
+ return;
+
+ default:
+ /* No need to handle all strange variants, let
+ * output_addr_const do it for us.
+ */
+ if (CONSTANT_P(operand)) {
+ output_addr_const(file, operand);
+ return;
+ }
+
+ zip_debug_rtx(x);
+ LOSE_AND_RETURN("unexpected operand", x);
+ }
+}
+
+static bool
+zip_frame_pointer_required(void)
+{
+ // This should really depend upon whether we have variable sized
+ // arguments in our frame or not. Once this fails, let's look
+ // at what the problem was and then whether or not we can detect
+ // it.
+ //
+ // Use a GCC global to determine our answer
+ if (cfun->calls_alloca)
+ return true;
+
+ // If the stack frame is too large to access saved registers with
+ // immediate offsets, then we *must* use a frame pointer
+ unsigned stack_size = 36;
+ stack_size += (ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+
+ //
+ // if cfun->machine->size_for_adjusting_sp might ever be larger than
+ // zip_max_anchor_offset, then we MUST have a frame pointer.
+ //
+ // cfun->machine->size_for_adjusting_sp =
+ // get_frame_size
+ // + saved_reg_size (will always be <= 36)
+ // + outgoing_args_size;
+ // + pretend_args_size;
+
+ if(crtl->args.pretend_args_size > 0)
+ stack_size += crtl->args.pretend_args_size;
+ stack_size += get_frame_size();
+ // Align our attempted stack size
+ stack_size = ((stack_size+3)&-4);
+
+ // Now here's our test
+ if (stack_size >= zip_max_anchor_offset)
+ return true;
+ return (frame_pointer_needed);
+/*
+*/
+}
+
+/* Determine whether or not a register needs to be saved on the stack or not.
+ */
+static bool
+zip_save_reg(int regno) {
+ if (regno == 0)
+ return ((!crtl->is_leaf)
+ ||((df_regs_ever_live_p(0))&&(!call_used_regs[0])));
+ else if ((regno == zip_GOT)&&(!ZIP_PIC))
+ return ((df_regs_ever_live_p(regno))
+ &&(!call_used_regs[regno]));
+ else if (regno == zip_FP)
+ return((zip_frame_pointer_required())||((df_regs_ever_live_p(regno))
+ &&(!call_used_regs[regno])));
+ else if (regno < zip_FP)
+ return ((df_regs_ever_live_p(regno))
+ &&(!call_used_regs[regno]));
+ return false;
+}
+
+/* Compute the size of the local area and the size to be adjusted by the
+ * prologue and epilogue.
+ *
+ * Here's what we are looking at (top is the current, bottom is the last ...)
+ *
+ * Stack Pointer ->
+ * Outgoing arguments
+ * Local variables (could be variable size)
+ * Frame Pointer -> (= Stack Pointer + sp_fp_offset)
+ * Saved return address, if saved
+ * Other Saved registers
+ * Saved frame pointer (if used)
+ * Saved R12, if used
+ * (Stack pointer is not saved)
+ * (PRETEND-ARGS)
+ * Original stack pointer -> (= Stack_Pointer +size_for_adjusting_sp)
+ * Called arguments (not passed in registers)
+ * Return arguments (not R1, args.pretend_args_size)
+ * (Prior function's stack frame ... )
+ *
+ */
+static void
+zip_compute_frame(void) {
+ int regno;
+ int args_size;
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "ZIP-COMPUTE-FRAME: %s\n", current_function_name());
+ // gcc_assert(crtl);
+ gcc_assert(cfun);
+ gcc_assert(cfun->machine);
+
+ args_size=(ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+
+ if(crtl->args.pretend_args_size > 0) {
+ args_size += crtl->args.pretend_args_size;
+ if (dbg) fprintf(stderr, "%s pretend_args_size : %d\n", current_function_name(),
+ crtl->args.pretend_args_size);
+ cfun->machine->pretend_size = crtl->args.pretend_args_size;
+ }
+
+ cfun->machine->local_vars_size = get_frame_size();
+
+ // Force frame alignment of the local variable section
+ cfun->machine->local_vars_size += 3;
+ cfun->machine->local_vars_size &= -4;
+
+ // Save callee-saved registers.
+ cfun->machine->saved_reg_size = 0;
+ for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+ if (zip_save_reg(regno))
+ cfun->machine->saved_reg_size += 4;
+ }
+
+ cfun->machine->fp_needed = (zip_frame_pointer_required());
+
+ if ((cfun->machine->fp_needed)&&
+ (!df_regs_ever_live_p(zip_FP))) {
+ cfun->machine->saved_reg_size += 4;
+ }
+
+ cfun->machine->sp_fp_offset = crtl->outgoing_args_size
+ + cfun->machine->local_vars_size;
+ cfun->machine->size_for_adjusting_sp = cfun->machine->local_vars_size
+ + cfun->machine->saved_reg_size
+ + args_size;
+ if(dbg) {
+ fprintf(stderr, "\t---- STACK PTR ----\n");
+ fprintf(stderr, "\tOUTGOIN-SIZE: %d\n",
+ crtl->outgoing_args_size);
+ fprintf(stderr, "\tLOCALS-SIZE : %d\n",
+ cfun->machine->local_vars_size);
+ fprintf(stderr, "\t---- FRAME PTR ----%s\n",
+ cfun->machine->fp_needed?"":" (Eliminated)");
+ fprintf(stderr, "\tREGISTERS : %d\n",
+ cfun->machine->saved_reg_size);
+ fprintf(stderr, "\tPRETEND SIZE: %d\n",
+ crtl->args.pretend_args_size);
+ fprintf(stderr, "\t---- ARG PTR (Original SP, should be eliminated) ----\n");
+ fprintf(stderr, "\t----\n");
+ fprintf(stderr, "\tARGS-SIZE : %d\n", args_size);
+ fprintf(stderr, "\tSP_FP_OFFSET: %d\n",
+ cfun->machine->sp_fp_offset);
+ fprintf(stderr, "\tSP-ADJUSTMNT: %d\n",
+ cfun->machine->size_for_adjusting_sp);
+ }
+}
+
+void
+zip_save_registers(rtx basereg_rtx, int sp_offset_to_first_register) {
+ rtx insn;
+ ZIPDEBUGFLAG(dbg, false);
+
+ // Compute Frame has already been calculated before coming into here
+ //
+ // zip_compute_frame();
+ if (dbg) fprintf(stderr, "PROLOGUE::SAVE-REGISTER\n");
+
+ int offset = 0, regno;
+ for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+ if (zip_save_reg(regno)) {
+ if (dbg) fprintf(stderr,
+ "PROLOGUE::SAVE-REGISTER Saving R%d in %d+%d(SP)\n",
+ regno, sp_offset_to_first_register, offset);
+ insn=emit_insn(gen_movsi_sto_off(
+ basereg_rtx,
+ GEN_INT(sp_offset_to_first_register +offset),
+ gen_rtx_REG(SImode, regno)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ offset += 4;
+ }
+ } if (dbg) fprintf(stderr, "%d registers saved%s\n", offset,
+ (crtl->saves_all_registers)?", should be all of them":", less than all");
+
+}
+
+/*
+ * zip_expand_small_prologue()
+ *
+ * To be used when the sp_fp_offset is less then zip_max_opb_offset.
+ *
+ *
+ * Approach:
+ * SUB size_for_adjusting_sp,SP
+ * SW REG,0(SP)
+ * SW REG,4(SP)
+ * SW REG,8(SP)
+ * ....
+ * SW REG,#(SP)
+ *
+ * and if we need a frame register, we'll either do ...
+ * MOV sp_fp_offset+SP,FP
+ * or if the offset is too large, we'll do ...
+ * MOV SP,FP
+ * ADD sp_fp_offset,FP
+ *
+ */
+void
+zip_expand_small_prologue(void) {
+ ZIPDEBUGFLAG(dbg, false);
+ rtx insn;
+
+ zip_compute_frame();
+
+ if (dbg) fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(SP-FP offset is %d)\n",
+ cfun->machine->sp_fp_offset);
+
+ insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+ gen_int_mode(cfun->machine->size_for_adjusting_sp,
+ SImode)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+
+ zip_save_registers(stack_pointer_rtx, cfun->machine->sp_fp_offset);
+
+ if (cfun->machine->fp_needed) {
+ if (dbg) fprintf(stderr, "PROLOGUE:::EXPAND-SMALL-PROLOGUE(FP-NEEDED)\n");
+ if (dbg) zip_debug_rtx(stack_pointer_rtx);
+ if (dbg) zip_debug_rtx(frame_pointer_rtx);
+ if (cfun->machine->sp_fp_offset < zip_max_mov_offset) {
+ if (dbg) fprintf(stderr,
+ "PROLOGUE:::EXPAND-SMALL-PROLOGUE() "
+ "gen_movsi_reg_off(FP, SP, %d), %d < %ld\n",
+ cfun->machine->sp_fp_offset,
+ cfun->machine->sp_fp_offset,
+ zip_max_mov_offset);
+ insn = emit_insn(gen_movsi_reg_off(frame_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT(cfun->machine->sp_fp_offset)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ } else {
+ rtx fp_rtx;
+
+ fp_rtx = gen_rtx_REG(SImode, zip_FP);
+
+ insn = emit_insn(gen_movsi(fp_rtx, stack_pointer_rtx));
+ RTX_FRAME_RELATED_P(insn) = 1;
+
+ insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx,
+ GEN_INT(cfun->machine->sp_fp_offset)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ }
+ }
+}
+
+/*
+ * zip_expand_large_prologue()
+ *
+ * The prologue function will be called when the size_for_adjusting_sp is too
+ * large to fit into a single OPB-immediate as part of a subtract.
+ *
+ * Approach:
+ * SUB (size_for_adjusting_sp-sp_fp_offset),SP
+ * SW R0,(SP)
+ * SW R5,4(SP)
+ * SW R6,8SP)
+ * SW R7,(SP)
+ * ...
+ * SW FP,(SP)
+ *
+ * LDI sp_fp_offset,FP
+ * SUB FP,SP
+ * ADD SP,FP
+ */
+void
+zip_expand_large_prologue(void) {
+ ZIPDEBUGFLAG(dbg, false);
+ rtx insn, fp_rtx;
+
+ gcc_assert(cfun->machine->fp_needed);
+
+ if (dbg) fprintf(stderr, "PROLOGUE::expand-large(%d-%d)\n",
+ cfun->machine->size_for_adjusting_sp,
+ cfun->machine->sp_fp_offset);
+ insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+ gen_int_mode(cfun->machine->size_for_adjusting_sp
+ -cfun->machine->sp_fp_offset, SImode)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+
+ zip_save_registers(stack_pointer_rtx, 0);
+
+ fp_rtx = gen_rtx_REG(SImode, zip_FP);
+
+ insn = emit_insn(gen_movsi(fp_rtx,
+ gen_int_mode(cfun->machine->sp_fp_offset, SImode)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+
+ insn = emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx,
+ fp_rtx));
+ RTX_FRAME_RELATED_P(insn) = 1;
+
+ insn = emit_insn(gen_addsi3(fp_rtx, fp_rtx, stack_pointer_rtx));
+ RTX_FRAME_RELATED_P(insn) = 1;
+}
+
+void
+zip_expand_prologue(void) {
+ ZIPDEBUGFLAG(dbg, false);
+
+ zip_compute_frame();
+
+ if (dbg) fprintf(stderr, "PROLOGUE: Computing Prologue instructions\n");
+ if (dbg) fprintf(stderr, "PROLOGUE: SP-FP offset is %d\n",
+ cfun->machine->sp_fp_offset);
+ if (cfun->machine->size_for_adjusting_sp != 0) {
+ if (cfun->machine->size_for_adjusting_sp <= zip_max_anchor_offset) {
+ if (dbg) fprintf(stderr, "PROLOGUE: "
+ "%d <= %ld, so going small\n",
+ cfun->machine->size_for_adjusting_sp,
+ zip_max_opb_imm);
+ zip_expand_small_prologue();
+ } else {
+ zip_expand_large_prologue();
+ }
+ }
+}
+
+int
+zip_use_return_insn(void)
+{
+ if ((!reload_completed)||(cfun->machine->fp_needed)
+ ||(get_frame_size()!=0)) {
+ // If R0 ever gets pushed to the stack, then we cannot
+ // use a master return from anywhere. We need to clean up the
+ // stack first.
+ if ((!crtl->is_leaf)||((df_regs_ever_live_p(0))
+ &&(!call_used_regs[0]))) {
+ return 0;
+ }
+ }
+ zip_compute_frame();
+ return (cfun->machine->size_for_adjusting_sp == 0)?1:0;
+}
+
+/* As per the notes in M68k.c, quote the function epilogue should not depend
+ * upon the current stack pointer. It should use the frame pointer only,
+ * if there is a frame pointer. This is mandatory because of alloca; we also
+ * take advantage of it to omit stack adjustments before returning ...
+ *
+ * Let's see if we can use their approach here.
+ *
+ * We can't. Consider our choices:
+ * LW (FP),R0
+ * LW 4(FP),R4
+ * LW 8(FP),R5
+ * LW 12(FP),R6
+ * LW 16(FP),FP
+ * ... Then what is the stack pointer?
+ * or
+ * LW (FP),R0
+ * LW 4(FP),R4
+ * LW 8(FP),R5
+ * LW 12(FP),R6
+ * MOV FP,SP
+ * LW 16(SP),FP
+ * ... Which suffers unnecessary pipeline stalls, and certainly doesn't
+ * exploit our pipeline memory function
+ * or
+ * MOV FP,SP
+ * LW (SP),R0
+ * LW 4(SP),R4
+ * LW 8(SP),R5
+ * LW 12(SP),R6
+ * LW 16(SP),FP
+ * Which will be our choice. Note that we do use the stack pointer, eventually.
+ *
+ */
+void
+zip_expand_epilogue(void) {
+ int regno, offset;
+ ZIPDEBUGFLAG(dbg, false);
+ rtx insn;
+
+ zip_compute_frame();
+
+ if (dbg) fprintf(stderr, "EPILOG::\n");
+ if (cfun->machine->fp_needed) {
+ // This is done special--if you can't trust the stack pointer
+ // enough so that you must have a frame pointer, then you can't
+ // trust its offset enough to restore from it. Hence, we start
+ // by moving the frame pointer to the stack pointer to recover
+ // the stack pointer back to a usable value.
+ if (dbg) fprintf(stderr, "EPILOG::Moving frame pointer to stack register\n");
+ insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ }
+
+ if (cfun->machine->saved_reg_size != 0) {
+ if (cfun->machine->fp_needed)
+ offset = 0;
+ else
+ offset = cfun->machine->sp_fp_offset;
+ if (dbg) fprintf(stderr, "EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+ for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+ if (zip_save_reg(regno)) {
+ if (dbg) fprintf(stderr, "EPILOG::RESTORING R%d from SP+%d\n", regno, offset);
+ rtx reg = gen_rtx_REG(SImode, regno);
+ insn = emit_insn(gen_movsi_lod_off(
+ reg,
+ stack_pointer_rtx,
+ GEN_INT(offset)));
+ add_reg_note(insn, REG_CFA_RESTORE, reg);
+ RTX_FRAME_RELATED_P(insn) = 1;
+ offset += 4;
+ }
+ }
+ }
+
+ if (cfun->machine->fp_needed) {
+ // Restore the stack pointer back to the original, the
+ // difference being the difference from the frame pointer
+ // to the original stack
+ insn = emit_insn(gen_addsi3(stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT(cfun->machine->size_for_adjusting_sp
+ -cfun->machine->sp_fp_offset)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ } else {
+ // else now the difference is between the stack pointer and
+ // the original stack pointer.
+ if (dbg) fprintf(stderr, "EPILOG::ADDSI3(StackPtr, %d)\n",
+ cfun->machine->size_for_adjusting_sp);
+ insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT(cfun->machine->size_for_adjusting_sp)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ }
+ if (dbg) fprintf(stderr, "EPILOG::EMITTING-RETURN\n");
+
+ // The return RTX is not allowed to be frame related
+ insn = emit_jump_insn(ret_rtx);
+ // RTX_FRAME_RELATED_P(insn) = 1;
+}
+
+void
+zip_sibcall_epilogue(void) {
+ int regno, offset;
+ ZIPDEBUGFLAG(dbg, false);
+ rtx insn;
+
+ zip_compute_frame();
+
+ if (dbg) fprintf(stderr, "EPILOG::\n");
+ if (cfun->machine->fp_needed) {
+ // This is done special--if you can't trust the stack pointer
+ // enough so that you must have a frame pointer, then you can't
+ // trust its offset enough to restore from it. Hence, we start
+ // by moving the frame pointer to the stack pointer to recover
+ // the stack pointer back to a usable value.
+ if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Moving frame pointer to stack register\n");
+ insn = emit_insn(gen_movsi_raw(stack_pointer_rtx, frame_pointer_rtx));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ }
+
+ if (cfun->machine->saved_reg_size != 0) {
+ if (cfun->machine->fp_needed)
+ offset = 0;
+ else
+ offset = cfun->machine->sp_fp_offset;
+ if (dbg) fprintf(stderr, "SIBCALL-EPILOG::Saved_REG_Size = %d\n", cfun->machine->saved_reg_size);
+ for(regno=0; regno < FIRST_PSEUDO_REGISTER; regno++) {
+ if (zip_save_reg(regno)) {
+ if (dbg) fprintf(stderr, "SIBCALL-EPILOG::RESTORING R%d\n", regno);
+ rtx reg = gen_rtx_REG(SImode, regno);
+ insn = emit_insn(gen_movsi_lod_off(
+ reg,
+ stack_pointer_rtx,
+ GEN_INT(offset)));
+ add_reg_note(insn, REG_CFA_RESTORE, reg);
+ RTX_FRAME_RELATED_P(insn) = 1;
+ offset += 4;
+ }
+ }
+ }
+
+ if (cfun->machine->fp_needed) {
+ // Restore the stack pointer back to the original, the
+ // difference being the difference from the frame pointer
+ // to the original stack
+ insn = emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT(cfun->machine->size_for_adjusting_sp
+ -cfun->machine->sp_fp_offset)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ } else {
+ // else now the difference is between the stack pointer and
+ // the original stack pointer.
+ if (dbg) fprintf(stderr, "SIBCALL-EPILOG::ADDSI3(StackPtr, %d)\n",
+ cfun->machine->size_for_adjusting_sp);
+ insn = emit_insn(gen_addsi3(stack_pointer_rtx,stack_pointer_rtx,
+ GEN_INT(cfun->machine->size_for_adjusting_sp)));
+ RTX_FRAME_RELATED_P(insn) = 1;
+ }
+}
+
+rtx
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ //
+ // Don't try to compute anything other than frame zero.
+ //
+ if (count != 0)
+ return NULL_RTX;
+
+ // Make sure we've computed our frame, do we need to save registers?
+ zip_compute_frame();
+
+ if (zip_save_reg(zip_LR)) {
+ if (cfun->machine->fp_needed)
+ return gen_rtx_MEM(SImode, frame_pointer_rtx);
+ else
+ return gen_rtx_MEM(SImode, gen_rtx_PLUS(Pmode,
+ stack_pointer_rtx,
+ GEN_INT(cfun->machine->sp_fp_offset)));
+ } else {
+ return gen_rtx_REG(Pmode, zip_LR);
+
+ }
+}
+
+/* Implement RETURN_ADDR_RTX(COUNT, FRAMEADDR).
+ *
+ * We currently only support calculating the return address for the current
+ * frame.
+ */
+
+/*
+rtx
+zip_return_addr_rtx(int count, rtx frame ATTRIBUTE_UNUSED)
+{
+ if (count)
+ return NULL_RTX;
+
+ zip_compute_frame();
+
+ // saved return address for current function is at fp - 1
+ if (cfun->machine->save_ret)
+ return gen_rtx_MEM(Pmode, plus_constant(frame_pointer_rtx,
+ -UNITS_PER_WORD));
+ return get_hard_reg_initial_val(Pmode, RETURN_ADDRESS_REGNUM);
+}
+*/
+
+/* Implements the macro INITIAL_ELIMINATION_OFFSET,
+ * return the OFFSET.
+ */
+int
+zip_initial_elimination_offset(int from, int to) {
+ int ret = 0;
+ zip_compute_frame();
+
+/*
+ if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+ ret = cfun->machine->sp_fp_offset;
+ } else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+ // Since the ARG_POINTER_REGNUM is defined to be identical
+ // to the FRAME_POINTER_REGNUM, this "if" will never ever
+ // get called.
+ ret = cfun->machine->sp_fp_offset;
+ } else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+ // Since we define ARG_POINTER_REGNUM to be FRAME_POINTER_REGNUM
+ // we're asked for the offset between the frame pointer and
+ // itself. The result had better be zero.
+ //
+ ret = 0;
+ } else {
+ abort();
+ }
+*/
+
+ // Let's try using an ARG_POINTER != FRAME_POINTER
+ if (((from) == FRAME_POINTER_REGNUM)&&((to) == STACK_POINTER_REGNUM)) {
+ ret = cfun->machine->sp_fp_offset;
+ } else if (((from)=ARG_POINTER_REGNUM)&&((to)==STACK_POINTER_REGNUM)) {
+ // Since the ARG_POINTER_REGNUM is defined to be identical
+ // to the FRAME_POINTER_REGNUM, this "if" will never ever
+ // get called.
+ ret = cfun->machine->size_for_adjusting_sp;
+ } else if (((from)=ARG_POINTER_REGNUM)&&((to)==FRAME_POINTER_REGNUM)) {
+ ret = cfun->machine->size_for_adjusting_sp
+ - cfun->machine->sp_fp_offset;
+ } else {
+ abort();
+ }
+
+ return ret;
+}
+
+/*
+ * Code taken from m68k ...
+ */
+static bool
+zip_can_eliminate(int from, int to)
+{
+ // fprintf(stderr, "CAN_ELIMINATE::QUERYING(%d,%d)\n", from, to);
+ if ((from == zip_FP)&&(to == zip_SP))
+ return !cfun->machine->fp_needed;
+ return true;
+}
+
+/* Compute the number of word sized registers needed to hold a function
+ * argument of mode INT_MODE and tree type TYPE.
+ */
+int
+zip_num_arg_regs(enum machine_mode mode, const_tree type) {
+ int size;
+
+ if (targetm.calls.must_pass_in_stack(mode, type))
+ return 0;
+
+ if ((type)&&(mode == BLKmode))
+ size = int_size_in_bytes(type);
+ else
+ size = GET_MODE_SIZE(mode);
+
+ return (size + UNITS_PER_WORD - 1)/UNITS_PER_WORD;
+}
+
+static void
+zip_function_arg_advance(cumulative_args_t ca, machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED) {
+ CUMULATIVE_ARGS *cum;
+ int nreg;
+
+ cum = get_cumulative_args(ca);
+ nreg = zip_num_arg_regs(mode, type);
+ if (((*cum)+nreg) > NUM_ARG_REGS)
+ (*cum) = NUM_ARG_REGS;
+ else
+ (*cum) += nreg;
+}
+
+static rtx
+zip_function_arg(cumulative_args_t ca, machine_mode mode,
+ const_tree type ATTRIBUTE_UNUSED, bool named) {
+ CUMULATIVE_ARGS *cum;
+
+ if (!named)
+ return NULL_RTX;
+ cum = get_cumulative_args(ca);
+
+ if ((*cum) >= NUM_ARG_REGS)
+ return NULL_RTX;
+ return
+ gen_rtx_REG(mode, (*cum)+1);
+}
+
+/* DECL is the declaration of the function being targeted by the call, and EXP
+ * is the CALL_EXPR representing the call.
+ */
+bool zip_function_ok_for_sibcall(ATTRIBUTE_UNUSED tree decl, tree exp) {
+ // calls.c already checks whether or not the parameter stack space
+ // is identical, so ... let's hope this all works and find out.
+
+ //
+ // Actually, this will fail: If the sibling uses R5 to pass registers
+ // in and we don't, then there will be no way to restore R5. This is
+ // true for the current configuration. It will be true for future
+ // configurations if the sibling ever uses a register that must be
+ // saved as a parameter register.
+ //
+ // We can check this ... if we can count how many registers the
+ // sibling call will use.
+ //
+ CUMULATIVE_ARGS cum_v;
+ cumulative_args_t cum;
+ tree parameter;
+ machine_mode mode;
+ tree ttype;
+ rtx parm_rtx;
+ int i;
+ static const char zip_call_used_register[] = CALL_USED_REGISTERS;
+
+ INIT_CUMULATIVE_ARGS(cum_v, NULL, NULL, 0,0);
+ cum = pack_cumulative_args(&cum_v);
+ for (i=0; i<call_expr_nargs(exp); i++) {
+
+ parameter = CALL_EXPR_ARG(exp, i);
+
+ if ((!parameter) || (TREE_CODE(parameter)==ERROR_MARK))
+ return true;
+ ttype = TREE_TYPE(parameter);
+ gcc_assert(ttype);
+ mode = ttype->type_common.mode;
+
+ if (pass_by_reference(&cum_v, mode, ttype, true)) {
+ mode = Pmode;
+ ttype = build_pointer_type(ttype);
+ }
+
+ parm_rtx = zip_function_arg(cum, mode, ttype, 0);
+ zip_function_arg_advance(cum, mode, ttype, 0);
+ if (!parm_rtx)
+ continue;
+
+ // If it is a register
+ // and it is *NOT* a CALL_USED_REGISTER
+ // then we can't do this.
+ //
+ // Example: func(R1,..R4,R5)
+ // can be followed by func2(R1,.., up to R5)
+ // (not supported, though... just to simplify our test
+ // below)
+ // Example: func(R1,..R4)
+ // cannot be followed by func2(R1,..,R5)
+ // We would blow R5 away by our prologue, even if it was
+ // properly set.
+ // Example: func(R1,..R5)
+ // can be followed by func2(R1,.., up to R4)
+ // func2 may save R5 (which doesn't need saving) but that's
+ // irrelevant
+ // Example: func(R1,..up to R4)
+ // can be followed by func2(R1,.., up to R4)
+ //
+ if (REG_P(parm_rtx)&&(REGNO(parm_rtx))
+ &&(REGNO(parm_rtx)<sizeof(zip_call_used_register))
+ &&(!zip_call_used_register[REGNO(parm_rtx)]))
+ return false;
+ }
+
+ return true;
+
+ // We also need to check if the return types are the same ... or
+ // will GCC handle that for us?
+}
+
+void zip_canonicalize_comparison(int *code, rtx *op0, rtx *op1,
+ bool preserve_op0)
+{
+ ZIPDEBUGFLAG(dbg, false);
+ bool reverse = false;
+
+ if (dbg) fprintf(stderr, "CANONICALIZE ...%s\n", (preserve_op0)?"(Preserve Op0)":"");
+ if (dbg) zip_debug_rtx_pfx("CODE", gen_rtx_fmt_ee((rtx_code)*code, VOIDmode, gen_rtx_REG(CCmode,zip_CC), const0_rtx));
+ if (dbg) zip_debug_rtx_pfx("OP0 ", *op0);
+ if (dbg) zip_debug_rtx_pfx("OP1 ", *op1);
+
+ // Z -> Z
+ // NZ -> !Z
+ // LT -> N
+ // GE -> !N
+ // LTU -> C
+ // GEU -> !C
+ //
+ // LTE -> GTE w/ swapped operands
+ // GT -> LT w/ swapped operands
+ // GTU -> LTU w/ swapped operands
+ // LEU -> GEU w/ swapped operands
+ //
+
+ if ((CONST_INT_P(*op0))||(GET_CODE(*op0) == PLUS)) {
+ rtx tmp = *op0;
+ *op0 = *op1;
+ *op1 = tmp;
+ *code = (int)swap_condition((enum rtx_code)*code);
+ }
+
+ if (*code == GTU) {
+ if (REG_P(*op1)) {
+ //; Reverse the comparison
+ reverse = true;
+ } else if (CONST_INT_P(*op1)) {
+ //; A > B
+ //; A >= B+1
+ //; Add one to the integer constant,
+ //; And use a GEU comparison
+ *code = GEU;
+ *op1 = GEN_INT(INTVAL(*op1)+1);
+ } else {
+ //; Reverse the comparison
+ reverse = true;
+ }
+ } else if (*code == LEU) {
+ if (REG_P(*op1)) {
+ reverse = true;
+ } else if (CONST_INT_P(*op1)) {
+ //; A <= B
+ //; A < B+1
+ //; Add one to the integer constant,
+ //; And use a GTU comparison
+ *op1 = GEN_INT(INTVAL(*op1)+1);
+ *code = LTU;
+ } else {
+ reverse = true;
+ }
+ } else if (*code == LE) {
+ if (REG_P(*op1)) {
+ reverse = true;
+ } else if (CONST_INT_P(*op1)) {
+ //; A < B
+ //; A <= B-1
+ //; Add one to the integer constant,
+ //; And use a GTU comparison
+ *op1 = GEN_INT(INTVAL(*op1)-1);
+ *code = LT;
+ } else {
+ reverse = true;
+ }
+ } else if (*code == GT) {
+ if (REG_P(*op1)) {
+ //; Reverse the comparison
+ reverse = true;
+ } else if (CONST_INT_P(*op1)) {
+ //; A > B
+ //; A >= B+1
+ //; Add one to the integer constant,
+ //; And use a GTU comparison
+ *op1 = GEN_INT(INTVAL(*op1)+1);
+ *code = GE;
+ } else {
+ reverse = true;
+ }
+ }
+
+ if (reverse) {
+ rtx tem = *op0;
+ *op0 = *op1;
+ *op1 = tem;
+ *code = (int)swap_condition((enum rtx_code)*code);
+ }
+}
+
+static bool
+zip_fixed_condition_code_regs(unsigned int *a, unsigned int *b) {
+ *a = zip_CC;
+ *b = INVALID_REGNUM;
+ return true;
+}
+
+
+/* totally buggy - we can't return pointers to nested functions */
+static void
+zip_asm_trampoline_template(FILE *f)
+{
+ fprintf(f, "\tbrev\t0,r1\n");
+ fprintf(f, "\tldilo\t0,r1\n");
+ fprintf(f, "\tjmp r1\n");
+}
+
+/* Worker function for TARGET_TRAMPOLINE_INIT. */
+static void
+zip_trampoline_init(rtx m_tramp ATTRIBUTE_UNUSED,
+ tree fndecl ATTRIBUTE_UNUSED,
+ rtx chain_value ATTRIBUTE_UNUSED) {
+// #warning "This needs to be filled out"
+ abort();
+}
+
+static tree
+def_builtin(const char *name, enum insn_code icode, enum ZIP_BUILTIN_ID_CODE code,
+ tree type)
+{
+ tree t = add_builtin_function(name,type,code, BUILT_IN_MD, NULL, NULL_TREE);
+
+ if(t) {
+ zip_builtins[code] = t;
+ zip_builtins_icode[code] = icode;
+ }
+
+ return t;
+
+}
+
+void zip_init_builtins(void) {
+
+ tree void_ftype_void = build_function_type_list(void_type_node, NULL_TREE);
+#ifdef HAVE_zip_rtu
+ def_builtin("zip_rtu", CODE_FOR_zip_rtu, ZIP_BUILTIN_RTU, void_ftype_void);
+#endif
+#ifdef HAVE_zip_halt
+ def_builtin("zip_halt", CODE_FOR_zip_halt, ZIP_BUILTIN_HALT, void_ftype_void);
+#endif
+#ifdef HAVE_zip_busy
+ def_builtin("zip_busy", CODE_FOR_zip_busy, ZIP_BUILTIN_BUSY, void_ftype_void);
+#endif
+#ifdef HAVE_zip_idle
+ def_builtin("zip_idle", CODE_FOR_zip_idle, ZIP_BUILTIN_IDLE, void_ftype_void);
+#endif
+
+#ifdef HAVE_zip_syscall
+// Support int SYSCALL(callID, int a, int b, int c);
+ def_builtin("zip_syscall", CODE_FOR_zip_syscall, ZIP_BUILTIN_SYSCALL,
+ build_function_type_list(void_type_node, NULL_TREE));
+#endif
+
+#ifdef HAVE_zip_save_context
+ def_builtin("zip_save_context", CODE_FOR_zip_save_context, ZIP_BUILTIN_SAVE_CONTEXT,
+ build_function_type_list(void_type_node, ptr_type_node, 0));
+#endif
+
+#ifdef HAVE_zip_restore_context
+ def_builtin("zip_restore_context", CODE_FOR_zip_restore_context, ZIP_BUILTIN_RESTORE_CONTEXT,
+ build_function_type_list(void_type_node, ptr_type_node, 0));
+#endif
+
+#ifdef HAVE_zip_bitrev
+ def_builtin("zip_bitrev", CODE_FOR_zip_bitrev, ZIP_BUILTIN_BITREV,
+ build_function_type_list(unsigned_type_node, unsigned_type_node,
+ NULL_TREE));
+#endif
+
+#ifdef HAVE_zip_cc
+ def_builtin("zip_cc", CODE_FOR_zip_cc, ZIP_BUILTIN_CC,
+ build_function_type_list(unsigned_type_node, NULL_TREE));
+#endif
+
+#ifdef HAVE_zip_ucc
+ def_builtin("zip_ucc", CODE_FOR_zip_ucc, ZIP_BUILTIN_UCC,
+ build_function_type_list(unsigned_type_node, NULL_TREE));
+#endif
+
+}
+
+static tree
+zip_builtin_decl(unsigned zip_builtin_code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (zip_builtin_code >= ZIP_BUILTIN_MAX)
+ return error_mark_node;
+
+ return zip_builtins[zip_builtin_code];
+}
+
+static rtx
+zip_expand_builtin(tree exp, rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ machine_mode tmode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl = TREE_OPERAND(CALL_EXPR_FN(exp), 0);
+ bool nonvoid = (TREE_TYPE(TREE_TYPE(fndecl)) != void_type_node);
+ enum ZIP_BUILTIN_ID_CODE code=(enum ZIP_BUILTIN_ID_CODE)DECL_FUNCTION_CODE(fndecl);
+ enum insn_code icode = zip_builtins_icode[code];
+ rtx pat, op[5];
+ call_expr_arg_iterator iter;
+ tree arg;
+
+ if ((code == ZIP_BUILTIN_SAVE_CONTEXT)
+ ||(code == ZIP_BUILTIN_RESTORE_CONTEXT)) {
+ arg = first_call_expr_arg(exp, &iter);
+ if (arg == error_mark_node)
+ return NULL_RTX;
+ op[0] = expand_normal(arg);
+ if (GET_CODE(op[0]) != REG)
+ op[0] = force_reg(Pmode, op[0]);
+ pat = GEN_FCN(icode)(op[0]);
+ } else if (code == ZIP_BUILTIN_BITREV) {
+ arg = first_call_expr_arg(exp, &iter);
+ if (arg == error_mark_node) {
+ return NULL_RTX;
+ }
+ op[0] = expand_normal(arg);
+ if (!target)
+ target = gen_reg_rtx(SImode);
+ pat = GEN_FCN(icode)(target, op[0]);
+ } else if ((code == ZIP_BUILTIN_CC)||(code == ZIP_BUILTIN_UCC)) {
+ if (!target)
+ target = gen_reg_rtx(SImode);
+ pat = GEN_FCN(icode)(target);
+ } else // RTU, HALT, IDLE
+ pat = GEN_FCN(icode)();
+ if (!pat)
+ return NULL_RTX;
+ emit_insn(pat);
+ return (nonvoid ? target : const0_rtx);
+}
+
+static bool
+zip_scalar_mode_supported_p(enum machine_mode mode)
+{
+ if ((ZIP_HAS_DI)&&(mode == DImode))
+ return true;
+ if ((mode==SImode)||(mode==HImode)||(mode==QImode))
+ return true;
+ if (mode==SFmode) // &&(ZIP_FPU)
+ return true; // If (!ZIP_CPU), will need to be emulated
+ if (mode==DFmode) // Must always be emulated
+ return true;
+ return false;
+}
+
+static bool
+zip_libgcc_floating_mode_supported_p(enum machine_mode mode)
+{
+ return ((mode)==SFmode)||((mode)==DFmode);
+}
+
+static int
+zip_address_cost(rtx addr ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED, bool spd ATTRIBUTE_UNUSED) {
+ return 1;
+}
+
+static bool
+zip_mode_dependent_address_p(const_rtx addr ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED) {
+ return false;
+}
+
+static void
+zip_debug_print(const char *pfx, int lvl, const char *str) {
+ int i;
+ i = lvl;
+ if ((true)||(lvl == 0))
+ fprintf(stderr, "%s", pfx);
+ else
+ i += strlen(pfx);
+ while(i-->0)
+ fprintf(stderr, " ");
+ fprintf(stderr, "%s\n", str);
+}
+
+static void
+zip_debug_print_m(const char *pfx, int lvl, const char *str, enum machine_mode m) {
+ int i;
+
+ i = lvl;
+ if ((true)||(lvl == 0))
+ fprintf(stderr, "%s", pfx);
+ else
+ i = lvl+strlen(pfx);
+ while(i-->0)
+ fprintf(stderr, " ");
+ switch(m) {
+ case VOIDmode:
+ fprintf(stderr, "%s:V\n", str);
+ break;
+ case BLKmode:
+ fprintf(stderr, "%s:BLK\n", str);
+ break;
+ case BImode:
+ fprintf(stderr, "%s:BI\n", str);
+ break;
+ case QImode:
+ fprintf(stderr, "%s:QI\n", str);
+ break;
+ case HImode:
+ fprintf(stderr, "%s:HI\n", str);
+ break;
+#ifdef HAVE_SImode
+ case SImode:
+ fprintf(stderr, "%s:SI\n", str);
+ break;
+#endif
+#ifdef HAVE_DImode
+ case DImode:
+ fprintf(stderr, "%s:DI\n", str);
+ break;
+#endif
+ case CCmode:
+ fprintf(stderr, "%s:CC\n", str);
+ break;
+ default:
+ fprintf(stderr, "%s:?\n", str);
+ }
+}
+
+static void
+zip_debug_rtx_1(const char *pfx, const_rtx x, int lvl) {
+ if (x == NULL_RTX) {
+ zip_debug_print(pfx, lvl, "(NULL-RTX)");
+ return;
+ } else if (GET_CODE(x) > NUM_RTX_CODE) {
+ char buf[64];
+ sprintf(buf, "(BAD-RTX-CODE %d)", GET_CODE(x));
+ zip_debug_print(pfx, lvl, buf);
+ gcc_assert(0 && "Bad RTX Code");
+ return;
+ } switch(GET_CODE(x)) { // rtl.def
+ case PARALLEL:
+ zip_debug_print(pfx, lvl, "(PARALLEL");
+ if (XVEC(x,0) != NULL)
+ for(int j=0; j<XVECLEN(x,0);j++)
+ zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ debug_rtx(x);
+ break;
+ case INT_LIST: zip_debug_print(pfx, lvl, "(INT-LIST"); break;
+ case SEQUENCE:
+ zip_debug_print(pfx, lvl, "(SEQUENCE");
+ for(int j=0; j<XVECLEN(x,0);j++)
+ zip_debug_rtx_1(pfx, XVECEXP(x,0,j), lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ debug_rtx(x);
+ break;
+ case ADDRESS: zip_debug_print(pfx, lvl, "(ADDRESS"); break;
+ case DEBUG_INSN: zip_debug_print(pfx, lvl, "(DEBUG-INSN"); break;
+ case INSN:
+ zip_debug_print(pfx, lvl, "(INSN");
+ /*
+ { const rtx_insn *tmp_rtx;
+ for(tmp_rtx = as_a <const rtx_insn *>(x); tmp_rtx != 0; tmp_rtx = NEXT_INSN(tmp_rtx)) {
+ zip_debug_rtx_1(tmp_rtx, lvl+1);
+ }}
+ */
+ zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ debug_rtx(x);
+ break;
+ case JUMP_INSN: zip_debug_print(pfx, lvl, "(JUMP-INSN");
+ zip_debug_rtx_1(pfx, PATTERN(x), lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ /*
+ if (JUMP_LABEL(x)) {
+ if (GET_CODE(JUMP_LABEL(x)) == LABEL_REF) {
+ char buf[64];
+ sprintf(buf, "(LABEL *.L%d))", CODE_LABEL_NUMBER(LABEL_REF_LABEL(JUMP_LABEL(x))));
+ zip_debug_print(pfx, lvl+1, buf);
+ } else if (GET_CODE(JUMP_LABEL(x))==CODE_LABEL) {
+ char buf[64];
+ sprintf(buf, "(CODE_LABEL *.L%d))", CODE_LABEL_NUMBER(JUMP_LABEL(x)));
+ zip_debug_print(pfx, lvl+1, buf);
+ } else
+ zip_debug_print(pfx, lvl+1, "(w/Label))");
+ } else
+ zip_debug_print(pfx, lvl+1, "(NO label))");
+ debug_rtx(x);
+ */
+ break;
+ case CALL:
+ zip_debug_print(pfx, lvl, "(CALL (Adr) (Args)");
+ zip_debug_rtx_1(pfx, XEXP(x,0), lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1), lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case CALL_INSN: zip_debug_print(pfx, lvl, "(CALL-INSN");
+ debug_rtx(x);
+ break;
+ case BARRIER: zip_debug_print(pfx, lvl, "(BARRIER)"); break;
+ case RETURN: zip_debug_print(pfx, lvl, "(RETURN)"); break;
+ case NOTE:
+ { char buf[128];
+ sprintf(buf, "(NOTE %s)", GET_REG_NOTE_NAME(GET_MODE(x)));
+ zip_debug_print(pfx, lvl, buf);
+ }break;
+ case COND_EXEC: zip_debug_print(pfx, lvl, "(COND_EXEC)");
+ debug_rtx(x);
+ break;
+ case ASM_INPUT: zip_debug_print(pfx, lvl, "(ASM INPUT)"); break;
+ case ASM_OPERANDS: zip_debug_print(pfx, lvl, "(ASM OPERANDS)"); break;
+ case UNSPEC: zip_debug_print(pfx, lvl, "(UNSPEC)"); break;
+ case UNSPEC_VOLATILE: zip_debug_print(pfx, lvl, "(UNSPEC_VOLATILE)"); break;
+ case CODE_LABEL:
+ {
+ char buf[128];
+ sprintf(buf, "(CODE_LABEL *.L%d)", CODE_LABEL_NUMBER(x));
+ zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+ } break;
+ case SET:
+ zip_debug_print_m(pfx, lvl, "(SET", GET_MODE(x));
+ zip_debug_rtx_1(pfx, SET_DEST(x),lvl+1);
+ zip_debug_rtx_1(pfx, SET_SRC(x),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ debug_rtx(x);
+ break;
+ case REG: {
+ char buf[25], mstr[4];
+ mstr[0] = '\0';
+ if (GET_MODE(x) == QImode)
+ strcpy(mstr, ":QI");
+ else if (GET_MODE(x) == HImode)
+ strcpy(mstr, ":HI");
+ else if (GET_MODE(x) == VOIDmode)
+ strcpy(mstr, ":V");
+ if (REGNO(x) == zip_PC)
+ sprintf(buf, "(PC%s)", mstr);
+ else if (REGNO(x) == zip_CC)
+ sprintf(buf, "(CC%s)", mstr);
+ else if (REGNO(x) == zip_SP)
+ sprintf(buf, "(SP%s)", mstr);
+ else if (REGNO(x) == zip_FP)
+ sprintf(buf, "(REG%s FP)", mstr);
+ else if (REGNO(x) == zip_GOT)
+ sprintf(buf, "(REG%s GBL)", mstr);
+ else if (FUNCTION_VALUE_REGNO_P(REGNO(x)))
+ sprintf(buf, "(REG%s RTN-VL)", mstr);
+ else if (REGNO(x) == RETURN_ADDRESS_REGNUM)
+ sprintf(buf, "(REG%s RTN-AD)", mstr);
+ else
+ sprintf(buf, "(REG%s %d)", mstr, REGNO(x));
+ if (mstr[0])
+ zip_debug_print(pfx, lvl, buf);
+ else
+ zip_debug_print_m(pfx, lvl, buf, GET_MODE(x));
+ } break;
+ case IF_THEN_ELSE: // 51
+ zip_debug_print(pfx, lvl, "(IF-THEN-ELSE");
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,2),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case PC:
+ zip_debug_print(pfx, lvl, "(PC)");
+ break;
+ case CC0:
+ zip_debug_print(pfx, lvl, "(CC0)");
+ break;
+ case COMPARE:
+ zip_debug_print_m(pfx, lvl, "(COMPARE", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case CONST:
+ zip_debug_print_m(pfx, lvl, "(CONST", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case CONST_INT:
+ { char buf[128];
+ if (GET_MODE(x)==QImode)
+ sprintf(buf, "(CONST_INT:QI %ld)", (long)INTVAL(x));
+ else if (GET_MODE(x)==VOIDmode)
+ sprintf(buf, "(CONST_INT:V %ld, %016lx)", (long)INTVAL(x),
+ (unsigned long)INTVAL(x));
+ else
+ sprintf(buf, "(CONST_INT:? %ld)", (long)INTVAL(x));
+ zip_debug_print(pfx, lvl, buf);
+ } break;
+ case LABEL_REF:
+ { char buf[256];
+ sprintf(buf, "(LABEL *.L%d)", CODE_LABEL_NUMBER(LABEL_REF_LABEL(x)));
+ zip_debug_print(pfx, lvl, buf);
+ }
+ break;
+ case SYMBOL_REF:
+ {
+ char buf[1024];
+ sprintf(buf, "(SYMBOL: %s)", XSTR(x,0));
+ // fprintf(file, "%s", XSTR(x,0));
+ zip_debug_print(pfx, lvl, buf);
+ }
+ break;
+ case MEM:
+ zip_debug_print_m(pfx, lvl, "(MEM", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ /*
+ case VALUE:
+ {
+ char buf[64];
+ sprintf(buf, "(VALUE: %d)", INTVAL(XEXP,0));
+ zip_debug_print_m(pfx, lvl, "buf", GET_MODE(x));
+ }
+ break;
+ */
+ case PLUS:
+ zip_debug_print_m(pfx, lvl, "(PLUS", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case MINUS:
+ zip_debug_print_m(pfx, lvl, "(MINUS", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case AND:
+ zip_debug_print_m(pfx, lvl, "(AND", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case IOR:
+ zip_debug_print_m(pfx, lvl, "(OR", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case XOR:
+ zip_debug_print_m(pfx, lvl, "(XOR", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case MULT:
+ zip_debug_print_m(pfx, lvl, "(MULT", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case EQ: //
+ zip_debug_print_m(pfx, lvl, "(EQ", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case NE: //
+ zip_debug_print_m(pfx, lvl, "(NE", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case GE: //
+ zip_debug_print_m(pfx, lvl, "(GE", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case GT: //
+ zip_debug_print_m(pfx, lvl, "(GT", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case LE: //
+ zip_debug_print_m(pfx, lvl, "(LE", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case LT: //
+ zip_debug_print_m(pfx, lvl, "(LT", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case GEU: //
+ zip_debug_print_m(pfx, lvl, "(GEU", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case GTU: //
+ zip_debug_print_m(pfx, lvl, "(GTU", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case LEU: //
+ zip_debug_print_m(pfx, lvl, "(LEU", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case LTU: //
+ zip_debug_print_m(pfx, lvl, "(LTU", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case SCRATCH: //
+ zip_debug_print_m(pfx, lvl, "(SCRATCH)", GET_MODE(x));
+ break;
+ case SUBREG:
+ { char buf[64], mstr[8];
+ if (GET_MODE(x) == QImode)
+ strcpy(mstr, ":QI");
+ else if (GET_MODE(x) == HImode)
+ strcpy(mstr, ":HI");
+ else if (GET_MODE(x) == SImode)
+ strcpy(mstr, ":SI");
+ else if (GET_MODE(x) == VOIDmode)
+ strcpy(mstr, ":V");
+ else
+ strcpy(mstr, ":?");
+ if (REG_P(XEXP(x,0))) {
+ int hreg = REGNO(XEXP(x,0)), mod = GET_MODE(XEXP(x,0)),
+ sb = SUBREG_BYTE(x);
+ if (mod==QImode)
+ sprintf(buf,"(SUBREG%s (REG:QI %d)/%d)",mstr,hreg, sb);
+ else if (mod==HImode)
+ sprintf(buf,"(SUBREG%s (REG:HI %d)/%d)",mstr,hreg, sb);
+ else if (mod==QImode)
+ sprintf(buf,"(SUBREG%s (REG:QI %d)/%d)",mstr,hreg, sb);
+ else if (mod==VOIDmode)
+ sprintf(buf,"(SUBREG%s (REG:V %d)/%d)",mstr,hreg, sb);
+ else
+ sprintf(buf,"(SUBREG%s %d:?/%d)",mstr,hreg, sb);
+ zip_debug_print(pfx, lvl, buf);
+ } else if (MEM_P(XEXP(x,0))) {
+ sprintf(buf, "(SUBREG%s /%d", mstr,SUBREG_BYTE(x));
+ zip_debug_print(pfx, lvl, buf);
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ } else {
+ sprintf(buf, "(SUBREG%s UNK /%d", mstr,SUBREG_BYTE(x));
+ zip_debug_print(pfx, lvl, buf);
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ }}
+ break;
+ case ASHIFT:
+ zip_debug_print_m(pfx, lvl, "(ASHIFT", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case ASHIFTRT:
+ zip_debug_print_m(pfx, lvl, "(ASHIFTRT", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case LSHIFTRT:
+ zip_debug_print_m(pfx, lvl, "(LSHIFTRT", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_rtx_1(pfx, XEXP(x,1),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case ZERO_EXTRACT:
+ zip_debug_print_m(pfx, lvl, "(ZERO_EXTRACT", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ case ZERO_EXTEND:
+ zip_debug_print_m(pfx, lvl, "(ZERO_EXTEND", GET_MODE(x));
+ zip_debug_rtx_1(pfx, XEXP(x,0),lvl+1);
+ zip_debug_print(pfx, lvl, ")");
+ break;
+ default:
+ { char buf[128];
+ sprintf(buf, "(? = %d) -- calling DEBUG-RTX", GET_CODE(x));
+ zip_debug_print(pfx, lvl, buf);
+ debug_rtx(x);
+ } break;
+ }
+}
+
+void
+zip_debug_rtx_pfx(const char *pfx, const_rtx x) {
+ zip_debug_rtx_1(pfx, x, 0);
+}
+
+void
+zip_debug_rtx(const_rtx x) {
+ zip_debug_rtx_pfx("", x);
+}
+
+void
+zip_debug_ccode(int ccode) {
+ switch(ccode) {
+ case EQ: fprintf(stderr, "EQ"); break;
+ case NE: fprintf(stderr, "NE"); break;
+ case GE: fprintf(stderr, "GE"); break;
+ case LT: fprintf(stderr, "LT"); break;
+ case LTU: fprintf(stderr, "LTU"); break;
+ case GEU: fprintf(stderr, "GEU"); break;
+ case GT: fprintf(stderr, "GT[!]"); break;
+ case LE: fprintf(stderr, "LE[!]"); break;
+ case GTU: fprintf(stderr, "GTU[!]"); break;
+ case LEU: fprintf(stderr, "LEU[!]"); break;
+ default:
+ fprintf(stderr, "%d", ccode); break;
+ }
+}
+
+void
+zip_debug_insn(rtx_insn *insn ATTRIBUTE_UNUSED) {
+}
+
+void
+zip_debug_bb(basic_block bb) {
+ rtx_insn *insn;
+
+ fprintf(stderr, "************ BASIC-BLOCK ***************\n");
+ FOR_BB_INSNS(bb, insn)
+ {
+ zip_debug_rtx(insn);
+ }
+}
+
+
+static bool
+zip_legitimate_opb(rtx x, bool strict)
+{
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB\n");
+ if (dbg) zip_debug_rtx_pfx("Test: ", x);
+
+ if (NULL_RTX == x)
+ return false;
+ else if ((GET_MODE(x) != QImode)
+ &&(GET_MODE(x) != HImode)
+ &&(GET_MODE(x) != SImode)
+ &&(GET_MODE(x) != VOIDmode)) {
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> Mode failure\n");
+ return false;
+ } else if ((strict)&&(REG_P(x))) {
+ if (REGNO(x)<zip_CC) {
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+ return true;
+ } else return false;
+ } else if (register_operand(x, GET_MODE(x))) {
+ // This also handles subregs
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> (Reg)\n");
+ return true;
+ } else if ((CONST_INT_P(x))
+ &&(INTVAL(x) >= zip_min_opb_imm)
+ &&(INTVAL(x) <= zip_max_opb_imm)) {
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (Const) %ld <= %ld <= %ld\n", (long)zip_min_opb_imm, (long)INTVAL(x), (long)zip_max_opb_imm);
+ return true;
+ // } else if ((GET_CODE(x) == LABEL_REF)||(GET_CODE(x)==CODE_LABEL)) {
+ // return true;
+ } else if (GET_CODE(x) == PLUS) {
+ // Is it a valid register?
+ rtx regrtx = XEXP(x, 0);
+ if ((!strict)&&(!REG_P(regrtx))) {
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No (No reg in +%s)\n",
+ (GET_CODE(XEXP(x,1))==REG)?", reg in op[1]":"");
+ return false;
+ } else if ((strict)&&((!REG_P(XEXP(x,0)))||(REGNO(XEXP(x,0))>=zip_CC))) {
+ return false;
+ } if ((GET_CODE(XEXP(x, 1)) == CONST_INT)
+ &&(INTVAL(XEXP(x, 1)) <= zip_max_anchor_offset)
+ &&(INTVAL(XEXP(x, 1)) >= zip_min_anchor_offset)) {
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> YES! (reg+int)\n");
+ // if((INTVAL(XEXP(x,1))<0)&&(REGNO(XEXP(x,0))==zip_SP))
+ // gcc_unreachable();
+ return true;
+ } if ((GET_CODE(XEXP(x, 1)) == LABEL_REF)
+ ||(GET_CODE(XEXP(x, 1)) == CODE_LABEL)
+ ||(GET_CODE(XEXP(x, 1)) == SYMBOL_REF)) {
+ // While we can technically support this, the problem
+ // is that the symbol address could be anywhere, and we
+ // have no way of recovering if it's outside of our
+ // 14 allowable bits.
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No. (reg+lbl)\n");
+ return false;
+ }
+ }
+
+ if (dbg) fprintf(stderr, "ZIP-LEGITIMATE-OPB -> No\n");
+ if (dbg) zip_debug_rtx(x);
+ return false;
+}
+
+static bool
+zip_legitimate_move_operand_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict) {
+ const bool dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+ if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND\n");
+ if (dbg) zip_debug_rtx_pfx("VMov?: ", x);
+
+ if (!zip_legitimate_opb(x, strict))
+ return false;
+ else if ((GET_CODE(x)==PLUS)&&(CONST_INT_P(XEXP(x,1)))) {
+ if ((INTVAL(XEXP(x, 1)) > zip_max_mov_offset)
+ ||(INTVAL(XEXP(x, 1)) < zip_min_mov_offset)) {
+ if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> NO! (reg+int), int out of bounds: %ld\n", (long)INTVAL(XEXP(x,1)));
+ return false;
+ }
+ }
+
+ if (dbg) fprintf(stderr, "ZIP-VALID-MOVE-OPERAND -> Yes\n");
+ if (dbg) zip_debug_rtx(x);
+ return true;
+}
+
+int
+zip_pd_mov_operand(rtx op)
+{
+ const bool dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+ if (dbg) fprintf(stderr, "ZIP-VALID-MOV(predicate) for OPERAND\n");
+ return zip_legitimate_move_operand_p(VOIDmode, op, !can_create_pseudo_p());
+}
+
+int
+zip_pd_mvimm_operand(rtx op)
+{
+ const bool dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+ if (dbg) fprintf(stderr, "ZIP-VALID-MVIMM(predicate) for OPERAND\n");
+ if (!CONST_INT_P(op))
+ return false;
+ if (INTVAL(op) > zip_max_mov_offset)
+ return false;
+ if (INTVAL(op) < zip_min_mov_offset)
+ return false;
+ return true;
+}
+
+int
+zip_pd_imm_operand(rtx op)
+{
+ const bool dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+ if (dbg) fprintf(stderr, "ZIP-VALID-IMM(predicate) for OPERAND\n");
+ if (!CONST_INT_P(op))
+ return false;
+ if (INTVAL(op) > zip_max_anchor_offset)
+ return false;
+ if (INTVAL(op) < zip_min_anchor_offset)
+ return false;
+ return true;
+}
+
+int
+zip_address_operand(rtx op)
+{
+ const bool dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+
+ if (dbg) fprintf(stderr, "ZIP-ADDRESS for OPERAND\n");
+ if ((REG_P(op))&&(REGNO(op)==zip_CC))
+ return false;
+ else if ((GET_CODE(op) == PLUS)&&(REG_P(XEXP(op,0)))
+ &&(REGNO(XEXP(op,0))==zip_CC))
+ return false;
+ else
+ return zip_legitimate_opb(op, !can_create_pseudo_p());
+}
+
+int
+zip_pd_opb_operand(rtx op)
+{
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "ZIP-OPB(predicate) for OPERAND\n");
+ return zip_legitimate_opb(op, false); //, !can_create_pseudo_p());
+}
+
+int
+zip_ct_address_operand(rtx op)
+{
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "ZIP-ADDRESS(constraint) for OPERAND\n");
+ return zip_legitimate_opb(op, !can_create_pseudo_p());
+}
+
+int
+zip_const_address_operand(rtx x) {
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS?\n");
+ if (dbg) zip_debug_rtx(x);
+ if ((GET_MODE(x) != SImode)&&(GET_MODE(x) != VOIDmode)) {
+ fprintf(stderr, "is ZIP-CONST-ADDRESS? -> NO, BAD MODE\n");
+ return false;
+ }
+ if ((GET_CODE(x) == LABEL_REF)
+ ||(GET_CODE(x) == CODE_LABEL)
+ ||(GET_CODE(x) == SYMBOL_REF)) {
+ if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (LBL)\n");
+ return true;
+ } else if (CONST_INT_P(x)) {
+ if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> YES! (INT)\n");
+ return true;
+ } else if (GET_CODE(x) == PLUS) {
+ if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(PLUS)\n");
+ return ((zip_const_address_operand(XEXP(x,0)))
+ &&(CONST_INT_P(XEXP(x,1))));
+ } else if (GET_CODE(x) == MINUS) {
+ if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS(MINUS)\n");
+ return ((zip_const_address_operand(XEXP(x,0)))
+ &&(zip_const_address_operand(XEXP(x,1))));
+ }
+
+ if (dbg) fprintf(stderr, "is ZIP-CONST-ADDRESS? -> No\n");
+ if (dbg) zip_debug_rtx(x);
+ return false;
+}
+
+int
+zip_ct_const_address_operand(rtx x) {
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(constraint)\n");
+ return zip_const_address_operand(x);
+}
+
+int
+zip_pd_const_address_operand(rtx x) {
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "ZIP-CONST-ADDRESS(predicate)\n");
+ return zip_const_address_operand(x);
+}
+
+
+static bool
+zip_legitimate_address_p(machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict)
+{
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "Zip-LEGITIMATE-ADDRESS-P\n");
+ if (dbg) zip_debug_rtx(x);
+
+ // Only insist the register be a valid register if strict is true
+ if (zip_legitimate_opb(x, strict))
+ return true;
+ // else if (zip_const_address_operand(x))
+ // return true;
+
+ return false;
+}
+
+static rtx
+zip_legitimize_address(rtx x, rtx oldx ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED) {
+ ZIPDEBUGFLAG(dbg, false);
+
+
+ if (dbg) zip_debug_rtx_pfx("LEGITIMIZE: ", x);
+ if (zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+ return x;
+
+ if (dbg) zip_debug_rtx_pfx("ILLEGITIMATE: ", x);
+ if (GET_CODE(x)==PLUS) {
+ // if ((zip_legitimate_address_p(mode, XEXP(x,0),
+ // !can_create_pseudo_p()))
+ // &&(GETMODE(XEXP(x,1))==CONST_INT)) {
+ //}
+ if (!REG_P(XEXP(x,0)))
+ XEXP(x,0) = force_reg(Pmode,XEXP(x,0));
+ if ((!zip_legitimate_address_p(mode, x, !can_create_pseudo_p()))
+ &&(!CONST_INT_P(XEXP(x,1))))
+ x = force_reg(GET_MODE(x),x);
+ } else if (MEM_P(x))
+ x = force_reg(GET_MODE(x),x);
+
+ if (dbg) zip_debug_rtx_pfx("LEGITIMATE: ", x);
+ return x;
+}
+
+void
+zip_asm_output_def(FILE *stream, const char *name, const char *value)
+{
+ fprintf(stream, "\t.equ %s, %s\n", name, value);
+}
+
+const char *zip_set_zero_or_one(rtx condition, rtx dst) {
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "ZIP::SET-ZERO-OR-ONE\n");
+ if (dbg) zip_debug_rtx_pfx("CND", condition);
+ if (dbg) zip_debug_rtx_pfx("REG", dst);
+ switch(GET_CODE(condition)) {
+ case EQ: return "LDI\t0,%0\n\tLDILO.Z\t1,%0\t; set01_eq";
+ case NE: return "LDI\t0,%0\n\tLDILO.NZ\t1,%0\t; set01_ne";
+ case LT: return "LDI\t0,%0\n\tLDILO.LT\t1,%0\t; set01_lt";
+ case GT: return "LDI\t1,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_gt";
+ case LE: return "LDI\t0,%0\n\tLDILO.LT\t1,%0\n\tLDILO.Z\t1,%0\t; set01_le";
+ case GE: return "LDI\t0,%0\n\tLDILO.GE\t1,%0\t; set01_ge";
+ case LTU: return "LDI\t0,%0\n\tLDILO.C\t1,%0\t; set01_ltu";
+ case GEU: return "LDI\t0,%0\n\tLDILO.NC\t1,%0\t; set01_geu";
+ case GTU: return "LDI\t1,%0\n\tLDILO.C\t0,%0\n\tLDILO.Z\t0,%0\t; set01_gtu";
+ case LEU: return "LDI\t0,%0\n\tLDILO.C\t1,%0\n\tLDILO.Z\t1,%0\t; set01_leu";
+ default:
+ zip_debug_rtx(condition);
+ internal_error("CSTORE Unsupported condition");
+ return NULL;
+ }
+}
+
+int
+zip_supported_condition(int c) {
+ switch(c) {
+ case EQ: case NE: case LT: case GE: case LTU: case GEU:
+ return 1;
+ break;
+ default:
+ break;
+ } return 0;
+}
+
+bool
+zip_signed_comparison(int c) {
+ switch(c) {
+ case NE: case LT: case EQ: case GE:
+ return true;
+ default:
+ break;
+ } return false;
+}
+
+int
+zip_expand_movdi(rtx dst, rtx src) {
+ ZIPDEBUGFLAG(dbg, false);
+
+ if (dbg) fprintf(stderr, "\nZIP::MOVDI\n");
+ if (dbg) zip_debug_rtx_pfx("DST", dst);
+ if (dbg) zip_debug_rtx_pfx("SRC", src);
+
+ // MOV !REG->!REG
+ if ((!REG_P(dst))&&(!REG_P(src))&&(can_create_pseudo_p())) {
+ // This includes:
+ // MOV MEM->MEM
+ // MOV IMM->MEM
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- !REG->!REG\n");
+
+ rtx tmp = gen_reg_rtx(DImode);
+ emit_insn(gen_movdi(tmp, src));
+ emit_insn(gen_movdi(dst, tmp));
+ return 1;
+ }
+
+ // MOV REG->REG
+ if ((REG_P(dst))&&(REG_P(src))) {
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->REG\n");
+
+ emit_insn(gen_movdi_raw(dst, src));
+ return 1;
+ }
+
+ // MOV REG->MEM (a store instruction)
+ if ((MEM_P(dst))&&(REG_P(src))) {
+ rtx addr = XEXP(dst,0);
+ long offset = 0;
+ if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+ offset = INTVAL(XEXP(addr,1));
+
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- REG->MEM\n");
+ if (REG_P(addr)) {
+ emit_insn(gen_movdi_raw(dst, src));
+ return 1;
+ } else if ((GET_CODE(addr)==PLUS)
+ &&(REG_P(XEXP(addr,0)))
+ &&(CONST_INT_P(XEXP(addr,1)))
+ &&(offset>=(long)zip_min_anchor_offset)
+ &&(offset+4<(long)zip_max_anchor_offset)) {
+ // Demonstrated and works
+ emit_insn(gen_movdi_raw(dst, src));
+ return 1;
+ } else if (can_create_pseudo_p()) {
+ rtx tmp = gen_reg_rtx(Pmode);
+ emit_insn(gen_movsi(tmp, addr));
+ emit_insn(gen_movdi_raw(gen_rtx_MEM(DImode, tmp), src));
+ return 1;
+ }
+ }
+
+ // MOV MEM->REG (a load instruction)
+ if ((REG_P(dst))&&(MEM_P(src))) {
+ rtx addr = XEXP(src,0);
+ long offset = 0;
+ if ((GET_CODE(addr)==PLUS)&&(CONST_INT_P(XEXP(addr,1))))
+ offset = INTVAL(XEXP(addr,1));
+
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM->REG\n");
+ if (REG_P(addr)) {
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[R]->REG\n");
+ emit_insn(gen_movdi_raw(dst, src));
+ return 1;
+ } else if ((GET_CODE(addr)==PLUS)
+ &&(REG_P(XEXP(addr,0)))
+ &&(CONST_INT_P(XEXP(addr,1)))
+ &&(offset>=(long)zip_min_anchor_offset)
+ &&(offset+4<(long)zip_max_anchor_offset)) {
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- MEM[#+R]->REG -- DONE\n");
+ emit_insn(gen_movdi_raw(dst, src));
+ return 1;
+ } else if (can_create_pseudo_p()) {
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- LDI #,R, MEM[R]->REG\n");
+ rtx tmp = gen_reg_rtx(Pmode);
+ emit_insn(gen_movsi(tmp, addr));
+ emit_insn(gen_movdi_raw(dst,
+ gen_rtx_MEM(DImode, tmp)));
+ return 1;
+ } else if (dbg)
+ fprintf(stderr, "ZIP::MOVDI -- MEM[?]->REG (no match)\n");
+ }
+
+ // MOV #->REG (An LDI instruction, but for DIwords)
+ if ((CONST_INT_P(src))&&(REG_P(dst))) {
+ if (dbg) fprintf(stderr, "ZIP::MOVDI -- IMM->REG\n");
+ emit_insn(gen_movdi_raw(dst, src));
+ return 1;
+ }
+
+ return 0;
+}
+
+const char *
+zip_addsicc(rtx dst, rtx condition, rtx ifsrc, rtx addv) {
+ // We know upon entry that REG_P(dst) must be true
+ if (!REG_P(dst))
+ internal_error("%s","ADDSICC into something other than register");
+
+ if ((REG_P(dst))&&(REG_P(ifsrc))&&(REG_P(addv))
+ &&(REGNO(dst)!=REGNO(ifsrc))) {
+ switch (GET_CODE(condition)) {
+ case EQ: return "MOV.Z\t%2,%0\n\tADD.Z\t%3,%0";
+ case NE: return "MOV.NZ\t%2,%0\n\tADD.NZ\t%3,%0";
+ case LT: return "MOV.LT\t%2,%0\n\tADD.LT\t%3,%0";
+
+ case LE: return "MOV.LT\t%3,%0\n\tMOV.Z\t%3,%0\n\tADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+ case GE: return "MOV.GE\t%2,%0\n\tADD.GE\t%3,%0";
+
+ case GT: return "BLT\t%.Laddsi%=\n\tBZ\t%%.Laddsi%=\n\tMOV\t%2,%0\n\tADD\t%3,%0\n.Laddsi%=:";
+ case LTU: return "MOV.C\t%2,%0\n\tADD.C\t%3,%0";
+
+ case LEU: return "MOV.C\t%2,%0\n\tMOV.Z\t%2,%0\n\tADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+ case GEU: return "MOV.NC\t%2,%0\n\tADD.NC\t%3,%0";
+ case GTU: return "BZ\t%.Laddsi%=\n\tMOV.NC\t%3,%0\n\tADD.NC\t%3,%0\n.Laddsi%=:";
+ default:
+ internal_error("%s", "Zip/No usable addsi expansion");
+ break;
+ }
+ }
+
+ if ((REG_P(ifsrc))&&(REGNO(dst)==REGNO(ifsrc))) {
+ switch (GET_CODE(condition)) {
+ case EQ: return "ADD.Z\t%3,%0";
+ case NE: return "ADD.NZ\t%3,%0";
+ case LT: return "ADD.LT\t%3,%0";
+ case LE: return "ADD.LT\t%3,%0\n\tADD.Z\t%3,%0";
+ case GE: return "ADD.GE\t%3,%0";
+ case GT: return "ADD.GE\t%3,%0\n\tSUB.Z\t%3,%0";
+ case LTU: return "ADD.C\t%3,%0";
+ case LEU: return "ADD.C\t%3,%0\n\tADD.Z\t%3,%0";
+ case GEU: return "ADD.NC\t%3,%0";
+ case GTU: return "SUB.Z\t%3,%0\n\tADD.NC\t%3,%0";
+ default:
+ internal_error("%s", "Zip/No usable addsi expansion");
+ break;
+ }
+ } else {
+ // MOV A+REG,REG
+ switch (GET_CODE(condition)) {
+ case EQ: return "MOV.Z\t%3+%2,%0";
+ case NE: return "MOV.NZ\t%3+%2,%0";
+ case LT: return "MOV.LT\t%3+%2,%0";
+ case GT: return "BLT\t.Laddcc%=\n\tBZ\t.Laddcc%=\n\tMOV\t%3+%2,%0\n.Laddcc%=";
+ case LE: return "MOV.LT\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+ case GE: return "MOV.GE\t%3+%2,%0";
+ case LTU: return "MOV.C\t%3+%2,%0";
+ case LEU: return "MOV.C\t%3+%2,%0\n\tMOV.Z\t%3+%2,%0";
+ case GEU: return "MOV.NC\t%3+%2,%0";
+ case GTU: return "BZ\t.Laddcc%=\n\tMOV.NC\t%3+%2,%0\n\t.Laddcc%=:";
+ default:
+ internal_error("%s", "Zip/No usable addsi(reg,reg) expansion");
+ break;
+ }
+ }
+
+ return "BREAK";
+}
+
+static int zip_memory_move_cost(machine_mode mode, reg_class_t ATTRIBUTE_UNUSED, bool in ATTRIBUTE_UNUSED) {
+ int rv = 14;
+ if ((mode == DImode)||(mode == DFmode))
+ rv += 2;
+ return rv;
+}
+
+// #warning "How do we tell the compiler LDI label is expensive as 2 ops"?
+static bool zip_cannot_modify_jumps_p(void) {
+ // Let's try their suggested approach, keeping us from modifying jumps
+ // after reload. This should also allow our peephole2 optimizations
+ // to adjust things back to what they need to be if necessary.
+ return (reload_completed || reload_in_progress);
+}
+
+rtx_insn *zip_ifcvt_info;
+
+void
+zip_ifcvt_modify_tests(ce_if_block *ce_info ATTRIBUTE_UNUSED, rtx *true_expr, rtx *false_expr) {
+ const bool dbg = ((ZIP_ALL_DEBUG_ON)||(false))&&(!ZIP_ALL_DEBUG_OFF);
+ if (dbg) fprintf(stderr, "IFCVT-MODIFY-TESTS\n");
+ if (*true_expr) switch(GET_CODE(*true_expr)) {
+ // These are our unsupported conditions
+ case LE:
+ case GT:
+ case LEU:
+ case GTU:
+ if (dbg) fprintf(stderr, "TRUE, missing expr\n");
+ if (dbg) zip_debug_rtx(*true_expr);
+ *true_expr = NULL_RTX;
+ break;
+ default: // LT, GT, GTE, LTU, NE, EQ
+ break;
+ }
+
+ if (*false_expr) switch(GET_CODE(*false_expr)) {
+ case LE:
+ case GT:
+ case LEU:
+ case GTU:
+ if (dbg) fprintf(stderr, "FALSE, missing expr\n");
+ if (dbg) zip_debug_rtx(*false_expr);
+ *false_expr = NULL_RTX;
+ default:
+ break;
+ }
+ if ((dbg)&&((!*true_expr)||(!*false_expr)))
+ fprintf(stderr, "IFCVT-MODIFY-TESTS -- FAIL\n");
+}
+
+void
+zip_ifcvt_machdep_init(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+if (!ceinfo->then_bb)
+ return;
+rtx_insn *insn;
+FOR_BB_INSNS(ceinfo->then_bb, insn) {
+ fprintf(stderr, "IFCVT -- INIT\n");
+ zip_debug_rtx_pfx("INIT-BB", insn);
+}
+*/
+/*
+ zip_ifcvt_info = NULL;
+ rtx_insn *insn, *ifinsn = NULL;
+ FOR_BB_INSNS(ceinfo->test_bb, insn) {
+ rtx p;
+ p = single_set(insn);
+ if (!p) continue;
+ if (SET_DEST(p)==pc_rtx) {
+ ifinsn = insn;
+ }
+ if (!REG_P(SET_DEST(p)))
+ continue;
+ if (GET_MODE(SET_DEST(p))!=CCmode)
+ continue;
+ if (REGNO(SET_DEST(p))!=zip_CC)
+ continue;
+ zip_ifcvt_info = insn;
+ }
+
+ if (zip_ifcvt_info)
+ zip_debug_rtx_pfx("PUTATIVE-CMP",zip_ifcvt_info);
+ if (ifinsn)
+ zip_debug_rtx_pfx("PRIOR-JMP",ifinsn);
+*/
+}
+
+void
+zip_ifcvt_modify_insn(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED,
+ rtx pattern ATTRIBUTE_UNUSED,
+ rtx_insn *insn ATTRIBUTE_UNUSED) {
+ // zip_debug_rtx_pfx("MODIFY-INSN: ", insn);
+}
+
+void
+zip_ifcvt_modify_cancel(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+ fprintf(stderr, "IFCVT -- CANCEL\n");
+ zip_ifcvt_info = NULL;
+*/
+}
+
+void
+zip_ifcvt_modify_final(struct ce_if_block *ceinfo ATTRIBUTE_UNUSED) {
+/*
+rtx_insn *insn;
+FOR_BB_INSNS(ceinfo->test_bb, insn) {
+ fprintf(stderr, "IFCVT -- FINAL\n");
+ zip_debug_rtx_pfx("FINAL-TEST-BB", insn);
+}
+ zip_ifcvt_info = NULL;
+*/
+}
+
+
+int zip_insn_sets_cc(rtx_insn *insn) {
+ return (get_attr_ccresult(insn)==CCRESULT_SET);
+}
+
+const char *
+zip_cbranchdi_const(rtx comparison,
+ rtx a ATTRIBUTE_UNUSED,
+ rtx b,
+ rtx label ATTRIBUTE_UNUSED) {
+ gcc_assert(CONST_INT_P(b));
+ long value = INTVAL(b);
+
+ // Look into the combine routines to find out why this routine never
+ // gets called.
+
+ switch(GET_CODE(comparison)) {
+ case EQ:
+ if (value < 0)
+ return "CMP\t-1,%H1\t; cbranchdi/# EQ (neg)\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+ else
+ return "CMP\t0,%H1\t; cbranchdi/# EQ\n\tCMP.Z\t%2,%L1\n\tBZ\t%3";
+ case NE:
+ if (value < 0)
+ return "CMP\t-1,%H1\t; cbranchdi/# NE (neg)\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+ else
+ return "CMP\t0,%H1\t; cbranchdi/# NE\n\tCMP.Z\t%2,%L1\n\tBNZ\t%3";
+ case LE:
+ if (value == 0)
+ return "CMP\t0,%H1\t; cbranchdi/# LE 0\n\tBLT\t%3\n\tCMP.Z\t0,%L1\n\tBZ\t%3";
+ else if (value == -1)
+ return "CMP\t0,%H1\t; cbranchdi/# LE -1\n\tBLT\t%3";
+ else if (value < 0) {
+ char tmp[128];
+ sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# LE (neg)\n"
+ "\tBLT\t.Lcmpdile%%=\n"
+ "\tBNZ\t%%3\n"
+ "\tCMP\t%ld,%%L1\n"
+ "\tBC\t%%3", (value+1l)&0x0ffffffff);
+ return ggc_alloc_string(tmp, -1);
+ } else { //; value > 0
+ char tmp[128];
+ sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LE\n"
+ "\tBLT\t%%3\n"
+ "\tBNZ\t.Lcmple%%=\n"
+ "\tCMP\t%ld,%%L1\n"
+ "\tBC\t%%3\n"
+ ".Lcmple%%=:", value-1);
+ return ggc_alloc_string(tmp, -1);
+ }
+ case LT:
+ if (value == 0)
+ return "CMP\t0,%H1\t; cbranchdi/# LT 0\n\tBLT\t%3";
+ else if (value < 0)
+ return "CMP\t-1,%H1\t; cbranchdi/# LT neg\n\tCMP.Z\t%2,%L1\n\tBC\t%3";
+ else
+ return "CMP\t0,%H1\t; cbranchdi/# LT\n"
+ "\tBLT\t%3\n"
+ "\tBNZ\t.Lcmplt%=\n"
+ "\tCMP\t%2,%L1\n"
+ "\tBC\t%3\n"
+ ".Lcmplt%=:";
+ case GT:
+ if (value == 0)
+ return "CMP\t1,%H1\t; cbranchdi/# GT 0\n"
+ "\tBGE\t%3\n"
+ "\tBNZ\t.Lcmpgt%=\n"
+ "\tCMP\t0,%L1\n"
+ "\tBNZ\t%3\n"
+ ".Lcmpgt%=:";
+ else if (value == -1)
+ return "CMP\t0,%H1\t; cbranchdi/# GT -1\n"
+ "\tBGE\t%3\n";
+ else if (value < 0) {
+ char tmp[128];
+ sprintf(tmp, "CMP\t-1,%%H1\t; cbranchdi/# GT neg\n"
+ "\tBLT\t.Lcmpgt%%=\n"
+ "\tBNZ\t%%3\n"
+ "\tCMP\t%ld,%%H3\n"
+ "\tBNC\t%%3\n"
+ ".Lcmpgt%%=:", value+1l);
+ return ggc_alloc_string(tmp, -1);
+ } else {
+ char tmp[128];
+ sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# GT\n"
+ "\tBLT\t.Lcmpgt%%=\n"
+ "\tBNZ\t%%3\n"
+ "\tCMP\t%ld,%%L1\n"
+ "\tBNC\t%%3\n"
+ ".Lcmpgt%%=:", value+1l);
+ return ggc_alloc_string(tmp, -1);
+ }
+ case GE:
+ if (value == 0)
+ return "CMP\t0,%H1\t; cbranchdi/# GE 0\n"
+ "\tBLT\t.Lcmpge%=\n"
+ "\tBNZ\t%3\n"
+ "\tCMP\t0,%L1\n"
+ "\tBNC\t%3\n"
+ ".Lcmpge%=:";
+ else if (value == -1)
+ return "CMP\t-1,%H1\t; cbranchdi/# GE -1\n"
+ "\tBLT\t.Lcmpge%=\n"
+ "\tBNZ\t%3\n"
+ "\tCMP\t-1,%L1\n"
+ "\tBZ\t%3\n"
+ ".Lcmpge%=:";
+ else if (value < 0)
+ return "CMP\t-1,%H1\t; cbranchdi/# GE <\n"
+ "\tBLT\t.Lcmpge%=\n"
+ "\tBNZ\t%3\n"
+ "\tCMP\t%2,%L1\n"
+ "\tBNC\t%3\n"
+ ".Lcmpge%=:";
+ else
+ return "CMP\t0,%H1\t; cbranchdi/# GE\n"
+ "\tBLT\t.Lcmpge%=\n"
+ "\tBNZ\t%3\n"
+ "\tCMP\t%2,%L1\n"
+ "\tBNC\t%3\n"
+ ".Lcmpge%=:";
+ case LTU:
+ if (value == 0) { //; Impossible, cannot be < 0 unsignd
+ return "; cbranchdi/# LTU 0 (Impossible!)";
+ } else
+ return "CMP\t0,%H1\t; cbranchdi/#\n\tCMP.Z\t%2,%L1\n\tBC\t%3\n";
+ case LEU:
+ if (value == 0) { //; Only possible if == 0
+ return "CMP\t0,%%H0\t; cbranchdi/# LEU 0\n"
+ "\tCMP.Z\t0,%%L0\n"
+ "\tBZ\t%3";
+ } else {
+ //; Subtract one, and LTU works
+ char tmp[128];
+ sprintf(tmp, "CMP\t0,%%H1\t; cbranchdi/# LEU\n"
+ "\tCMP.Z\t%ld,%%L1\n"
+ "\tBC\t%%3\n", value-1);
+ return ggc_alloc_string(tmp, -1);
+ }
+ case GTU:
+ if (value == 0) {
+ //; Equivalent to not equal to zero
+ return "CMP\t0,%H1\t; cbranchdi/# GTU 0\n\tCMP.Z\t0,%L1\n\tBNZ\t%3";
+ } else {
+ char tmp[128];
+ sprintf(tmp,
+ "CMP\t0,%%H1\t; cbranchdi/# GTU\n"
+ "\tBNZ\t%%3\n"
+ "\tCMP\t%ld,%%L1\n"
+ "\tBNC\t%%3\n", value+1);
+ return ggc_alloc_string(tmp, -1);
+ }
+ case GEU:
+ if (value == 0) //; Unsigned, always true
+ return "BRA\t%3\t; cbranchdi/# GEU 0";
+ else
+ return "CMP\t0,%H1\t; cbranchdi/# GEU\n"
+ "\tBNZ\t%3\n"
+ "\tCMP\t%2,%L1\n"
+ "\tBNC\t%3";
+ default:
+ gcc_unreachable();
+ }
+}
+
+const char *
+zip_cbranchdi_reg(rtx comparison,
+ rtx a ATTRIBUTE_UNUSED,
+ rtx b ATTRIBUTE_UNUSED,
+ rtx label ATTRIBUTE_UNUSED) {
+
+ switch(GET_CODE(comparison)) {
+ case EQ:
+ return "CMP\t%H2,%H1\t; cbranchdi/r EQ\n\tCMP.Z\t%L2,%L1\n\tBZ\t%3";
+ case NE:
+ return "CMP\t%H2,%H1\t; cbranchdi/r NE\n\tCMP.Z\t%L2,%L1\n\tBNZ\t%3";
+ case LE:
+ return "CMP\t%H2,%H1\t; cbranchdi/r LE\n"
+ "\tBLT\t%3\n"
+ "\tBNZ\t.Ldi%=\n"
+ "\tCMP\t%L1,%L2\n"
+ "\tBNC\t%3\n"
+ ".Ldi%=:";
+ case GT:
+ return "CMP\t%H1,%H2\t; cbranchdi/r GT\n"
+ "\tBLT\t%3\n"
+ "\tBNZ\t.Ldi%=\n"
+ "\tCMP\t%L1,%L2\n"
+ "\tBC\t%3\n"
+ ".Ldi%=:";
+ case LT:
+ return "CMP\t%H2,%H1\t; cbranchdi/r LT\n"
+ "\tBLT\t%3\n"
+ "\tBNZ\t.Ldi%=\n"
+ "\tCMP\t%L2,%L1\n"
+ "\tBC\t%3\n"
+ ".Ldi%=:";
+ case GE:
+ return "CMP\t%H1,%H2\t; cbranchdi/r GE\n"
+ "\tBLT\t%3\n"
+ "\tBNZ\t.Ldi%=\n"
+ "\tCMP\t%L2,%L1\n"
+ "\tBNC\t%3\n"
+ ".Ldi%=:";
+ case LTU:
+ return "CMP\t%H2,%H1\t; cbranchdi/r LTU\n"
+ "\tCMP.Z\t%L2,%L1\n"
+ "\tBC\t%3\n";
+ case LEU:
+ return "CMP\t%H1,%H2\t; cbranchdi/r LEU\n"
+ "\tBC\t.Ldi%=\n" //; H1 > H2, skip
+ "\tCMP.Z\t%L1,%L2\n" //; (H1==H2) test L1-L2
+ "\tBNC\t%3\n" //; If (L1>=L2)||(H1>H2)
+ ".Ldi%=:";
+ case GTU:
+ return "CMP\t%H1,%H2\t; cbranchdi/r GTU\n"
+ "\tCMP.Z\t%L1,%L2\n"
+ "\tBC\t%3";
+ case GEU:
+ return "CMP\t%H2,%H1\t; cbranchdi/r GEU\n"
+ "\tBC\t.Ldi%=\n"
+ "\tCMP.Z\t%L2,%L1\n"
+ "\tBNC\t%3\n"
+ ".Ldi%=:";
+ default:
+ gcc_unreachable();
+ }
+}
+
+const char *
+zip_cbranchdi(rtx comparison, rtx a, rtx b, rtx label) {
+ if (REG_P(b))
+ return zip_cbranchdi_reg(comparison, a, b, label);
+ else
+ return zip_cbranchdi_const(comparison, a, b, label);
+}
+
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zipdbg.h gcc-6.2.0-zip/gcc/config/zip/zipdbg.h
--- gcc-6.2.0/gcc/config/zip/zipdbg.h 1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zipdbg.h 2017-02-17 16:47:25.727651898 -0500
@@ -0,0 +1,8 @@
+#define DO_ZIP_DEBUGS
+#ifdef DO_ZIP_DEBUGS
+#include <stdio.h>
+#define ZIP_DEBUG_LINE(STR,RTX) do{fprintf(stderr,"%s:%d/%s\n",__FILE__,__LINE__,STR); zip_debug_rtx(RTX);} while(0)
+extern void zip_debug_rtx(const_rtx);
+#else
+#define ZIP_DEBUG_LINE(STR,RTX)
+#endif
diff -Naur '--exclude=*.swp' gcc-6.2.0/gcc/config/zip/zip-di.md gcc-6.2.0-zip/gcc/config/zip/zip-di.md
--- gcc-6.2.0/gcc/config/zip/zip-di.md 1969-12-31 19:00:00.000000000 -0500
+++ gcc-6.2.0-zip/gcc/config/zip/zip-di.md 2017-04-19 11:38:00.203886955 -0400
@@ -0,0 +1,548 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Filename: zip-di.md
+;;
+;; Project: Zip CPU -- a small, lightweight, RISC CPU soft core
+;;
+;; Purpose: This is the machine description of the Zip CPU as needed by the
+;; GNU compiler collection (GCC). Specifically, this is the
+;; section of the description associated with 64-bit values and
+;; arithmetic.
+;;
+;;
+;; Creator: Dan Gisselquist, Ph.D.
+;; Gisselquist Technology, LLC
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Copyright (C) 2015, Gisselquist Technology, LLC
+;;
+;; This program is free software (firmware): you can redistribute it and/or
+;; modify it under the terms of the GNU General Public License as published
+;; by the Free Software Foundation, either version 3 of the License, or (at
+;; your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+;;
+;; License: GPL, v3, as defined and found on www.gnu.org,
+;; http://www.gnu.org/licenses/gpl.html
+;;
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;;
+;
+;
+;
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ "(ZIP_HAS_DI)"
+ {
+ if (zip_expand_movdi(operands[0], operands[1]))
+ DONE;
+ FAIL;
+ }
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+(define_insn "movdi_raw"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,Q,r,r")
+ (match_operand:DI 1 "general_operand" "r,r,Q,i"))]
+ "(ZIP_HAS_DI)"
+ {
+ if ((REG_P(operands[0]))&&(REG_P(operands[1])))
+ return "MOV %H1,%H0\t; MOV:DI\n\tMOV %L1,%L0";
+ else if (MEM_P(operands[0])) //; StoreDI
+ return "SW %H1,%0\t; Store:DI\n\tSW %L1,4+%0";
+ else if (MEM_P(operands[1])) { //; LoadDI
+ //; Deal with the case of
+ //; LOD (R0),R0
+ //; LOD 4(R0),R1
+ //; By reversing the order of the operands, to
+ //; LOD 4(R0),R1
+ //; LOD (R0),R0
+ //; This isn't efficient, so let's do whatever we can to
+ //; avoid this, still ... if we do it, we can make it
+ //; work
+ rtx address = XEXP(operands[1],0);
+ int hazard = 0;
+ if ( (REG_P(address))
+ &&((REGNO(address))==(REGNO(operands[0]))) )
+ hazard = 1;
+ else if ( (PLUS == (GET_CODE(address)))
+ &&(REGNO(XEXP(address,0))==(REGNO(operands[0]))) )
+ hazard = 1;
+ if (hazard)
+ return "LW 4+%1,%L0\t; Load:DI\n\tLW %1,%H0";
+ else
+ return "LW %1,%H0\t; Load:DI\n\tLW 4+%1,%L0";
+ } else if (CONST_INT_P(operands[1])) {
+ char tmp[128];
+ HOST_WIDE_INT v = INTVAL(operands[1]);
+ sprintf(tmp, "LDI\t0x%08x,%%H0\t; LDI #:DI,%%H0\n\tLDI\t0x%08x,%%L0",
+ (unsigned)(v>>32),
+ (unsigned)(v));
+ return ggc_alloc_string(tmp, -1);
+ } else
+ gcc_unreachable();
+ }
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unchanged")])
+;
+;
+;
+; ADD
+;
+;
+(define_insn "adddi3" ; Fastest/best instruction always goes first
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "ADD %L2,%L0\n\tADD.C\t1,%H0\n\tADD\t%H2,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; SUB
+;
+;
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "SUB %L2,%L0\n\tSUB.C\t1,%H0\n\tSUB\t%H2,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; AND
+;
+;
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "AND %L2,%L0\t; AND:DI\n\tAND\t%H2,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; iOR
+;
+;
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "OR %L2,%L0\t; OR:DI\n\tOR\t%H2,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; XOR
+;
+;
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "XOR %L2,%L0\t; XOR:DI\n\tXOR\t%H2,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; NEG
+;
+;
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "0")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "XOR -1,%L0\t; NEG:DI\n\tXOR\t-1,%H0\n\tADD\t1,%L0\n\tADD.C\t1,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+;
+; ABS
+;
+;
+(define_insn "absdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (abs:DI (match_operand:DI 1 "register_operand" "0")))
+ (clobber (match_scratch:SI 2 "=r"))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "CLR %2 ; ABSDI
+ TEST %H0
+ LDILO.LT 1,%2
+ XOR.LT -1,%L0
+ XOR.LT -1,%H0
+ ADD %2,%L0
+ ADD.C 1,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; NOT
+;
+;
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_operand:DI 1 "register_operand" "0")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "XOR -1,%L0\t; NOT:DI\n\tXOR\t-1,%H0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; Unsigned min/max
+;
+;
+(define_insn "umindi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (umin:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "CMP %H0,%H2 ; umin:DI
+ CMP.Z %L0,%L2
+ MOV.C %H2,%H0
+ MOV.C %L2,%L0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+(define_insn "umaxdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (umax:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC CC_REG))
+ ]
+ "(ZIP_HAS_DI)"
+ "CMP %H2,%H0 ; umax:DI
+ CMP.Z %L2,%L0
+ MOV.C %H2,%H0
+ MOV.C %L2,%L0"
+ [(set_attr "predicable" "no") (set_attr "ccresult" "unknown")])
+;
+;
+; Multiply
+;
+;
+(define_expand "muldi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (match_dup 1))
+ (clobber (match_dup 2))
+ (clobber (match_scratch:SI 3 "=r"))
+ (clobber (reg:CC CC_REG))])]
+ "(ZIP_HAS_DI)")
+;
+(define_insn "muldi3_raw"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (match_dup 1))
+ (clobber (match_dup 2))
+ (clobber (match_scratch:SI 3 "=r"))
+ (clobber (reg:CC CC_REG))]
+ "(ZIP_HAS_DI)"
+ {
+ int regno[3];
+ regno[0] = REGNO(operands[0]);
+ regno[1] = REGNO(operands[1]);
+ regno[2] = REGNO(operands[2]);
+ //; We need to adjust what we are doing based upon which
+ //; registers are in common. We have a couple of cases:
+ //;
+ if ((regno[0] == regno[1])&&(regno[0] == regno[2])) {
+ //; RA = RA * RA
+ //;
+ //; (H0:L0) = (H0:L0) * (H0:L0)
+ //; (H0:L0) = (H0*2^32 + L0) * (H0 * 2^32 + L0)
+ //; (H0:L0) = (H0*H0*2^64 + (H0*L0+L0*H0)*2^32 + L0 *L0)
+ //; = (H0*L0+L0*H1):(L0*L0)
+ //; :L0 = LOPART(L0 * L0)
+ //; H0 = HIPART(L0 * L0)
+ //; H0 += LOPART(H0 * L0)
+ //; H0 += LOPART(L0 * H0)
+ //;
+ //; Rx = L0
+ //; H0 *= L0 ( = LOPART( HI * LO )