aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Kabaev <kan@FreeBSD.org>2005-06-03 03:28:44 +0000
committerAlexander Kabaev <kan@FreeBSD.org>2005-06-03 03:28:44 +0000
commitd51085f37e16c95804f0fdabb7b1226e4b4e7de9 (patch)
treef0dc8ad34f9fcaf27052e24e893a4284b5fee6e9
parent1689e31de64dad8b8826dc924a82c7ba59a54bf4 (diff)
downloadsrc-d51085f37e16c95804f0fdabb7b1226e4b4e7de9.tar.gz
src-d51085f37e16c95804f0fdabb7b1226e4b4e7de9.zip
Gcc 3.4.4 release.
Notes
Notes: svn path=/vendor/gcc/dist/; revision=146895
-rw-r--r--contrib/gcc/ChangeLog2322
-rw-r--r--contrib/gcc/Makefile.in53
-rw-r--r--contrib/gcc/attribs.c2
-rw-r--r--contrib/gcc/builtins.c12
-rw-r--r--contrib/gcc/c-common.c94
-rw-r--r--contrib/gcc/c-decl.c38
-rw-r--r--contrib/gcc/c-format.c80
-rw-r--r--contrib/gcc/c-opts.c38
-rw-r--r--contrib/gcc/c-parse.in2
-rw-r--r--contrib/gcc/c-ppoutput.c2
-rw-r--r--contrib/gcc/c-semantics.c13
-rw-r--r--contrib/gcc/c-typeck.c68
-rw-r--r--contrib/gcc/c.opt8
-rw-r--r--contrib/gcc/calls.c10
-rw-r--r--contrib/gcc/cfglayout.c12
-rw-r--r--contrib/gcc/cfgrtl.c30
-rw-r--r--contrib/gcc/collect2.c15
-rw-r--r--contrib/gcc/combine.c178
-rw-r--r--contrib/gcc/common.opt2
-rw-r--r--contrib/gcc/config.gcc170
-rw-r--r--contrib/gcc/config.in15
-rw-r--r--contrib/gcc/config/alpha/alpha.c139
-rw-r--r--contrib/gcc/config/alpha/alpha.h2
-rw-r--r--contrib/gcc/config/alpha/alpha.md124
-rw-r--r--contrib/gcc/config/alpha/qrnnd.asm4
-rw-r--r--contrib/gcc/config/alpha/t-osf46
-rw-r--r--contrib/gcc/config/arm/arm-protos.h4
-rw-r--r--contrib/gcc/config/arm/arm.c40
-rw-r--r--contrib/gcc/config/arm/arm.h2
-rw-r--r--contrib/gcc/config/arm/arm.md20
-rw-r--r--contrib/gcc/config/arm/t-netbsd6
-rw-r--r--contrib/gcc/config/arm/t-rtems10
-rw-r--r--contrib/gcc/config/darwin-protos.h2
-rw-r--r--contrib/gcc/config/darwin.c35
-rw-r--r--contrib/gcc/config/darwin.h14
-rw-r--r--contrib/gcc/config/freebsd-spec.h26
-rw-r--r--contrib/gcc/config/i386/cygwin1.c8
-rw-r--r--contrib/gcc/config/i386/darwin.h4
-rw-r--r--contrib/gcc/config/i386/emmintrin.h2
-rw-r--r--contrib/gcc/config/i386/freebsd.h11
-rw-r--r--contrib/gcc/config/i386/gthr-win32.c32
-rw-r--r--contrib/gcc/config/i386/i386-modes.def2
-rw-r--r--contrib/gcc/config/i386/i386-protos.h1
-rw-r--r--contrib/gcc/config/i386/i386.c132
-rw-r--r--contrib/gcc/config/i386/i386.h14
-rw-r--r--contrib/gcc/config/i386/i386.md1068
-rw-r--r--contrib/gcc/config/i386/t-rtems-i38612
-rw-r--r--contrib/gcc/config/i386/xmmintrin.h4
-rw-r--r--contrib/gcc/config/ia64/ia64.c98
-rw-r--r--contrib/gcc/config/ia64/t-glibc4
-rw-r--r--contrib/gcc/config/ia64/t-glibc-libunwind4
-rw-r--r--contrib/gcc/config/ia64/t-hpux2
-rw-r--r--contrib/gcc/config/ia64/unwind-ia64.c23
-rw-r--r--contrib/gcc/config/ia64/unwind-ia64.h3
-rw-r--r--contrib/gcc/config/rs6000/aix.h18
-rw-r--r--contrib/gcc/config/rs6000/aix41.h4
-rw-r--r--contrib/gcc/config/rs6000/aix43.h5
-rw-r--r--contrib/gcc/config/rs6000/aix52.h4
-rw-r--r--contrib/gcc/config/rs6000/altivec.h11868
-rw-r--r--contrib/gcc/config/rs6000/altivec.md4
-rw-r--r--contrib/gcc/config/rs6000/beos.h12
-rw-r--r--contrib/gcc/config/rs6000/darwin-ldouble-shared.c2
-rw-r--r--contrib/gcc/config/rs6000/darwin-ldouble.c39
-rw-r--r--contrib/gcc/config/rs6000/darwin.h7
-rw-r--r--contrib/gcc/config/rs6000/eabi.asm2
-rw-r--r--contrib/gcc/config/rs6000/libgcc-ppc64.ver10
-rw-r--r--contrib/gcc/config/rs6000/linux-unwind.h322
-rw-r--r--contrib/gcc/config/rs6000/linux.h98
-rw-r--r--contrib/gcc/config/rs6000/linux64.h195
-rw-r--r--contrib/gcc/config/rs6000/rs6000-c.c14
-rw-r--r--contrib/gcc/config/rs6000/rs6000-protos.h2
-rw-r--r--contrib/gcc/config/rs6000/rs6000.c781
-rw-r--r--contrib/gcc/config/rs6000/rs6000.h16
-rw-r--r--contrib/gcc/config/rs6000/rs6000.md212
-rw-r--r--contrib/gcc/config/rs6000/rtems.h21
-rw-r--r--contrib/gcc/config/rs6000/spe.h19
-rw-r--r--contrib/gcc/config/rs6000/spe.md4
-rw-r--r--contrib/gcc/config/rs6000/sysv4.h9
-rw-r--r--contrib/gcc/config/rs6000/t-aix435
-rw-r--r--contrib/gcc/config/rs6000/t-aix525
-rw-r--r--contrib/gcc/config/rs6000/t-linux645
-rw-r--r--contrib/gcc/config/rs6000/t-newas3
-rw-r--r--contrib/gcc/config/rs6000/t-rtems1
-rw-r--r--contrib/gcc/config/s390/s390.md14
-rw-r--r--contrib/gcc/config/s390/tpf.h3
-rw-r--r--contrib/gcc/config/sparc/sparc.c156
-rw-r--r--contrib/gcc/config/sparc/sparc.md1
-rw-r--r--contrib/gcc/config/sparc/t-elf4
-rw-r--r--contrib/gcc/config/t-libunwind9
-rw-r--r--contrib/gcc/config/t-libunwind-elf30
-rw-r--r--contrib/gcc/config/t-slibgcc-darwin6
-rw-r--r--contrib/gcc/config/t-slibgcc-elf-ver6
-rw-r--r--contrib/gcc/config/t-slibgcc-sld6
-rwxr-xr-xcontrib/gcc/configure1766
-rw-r--r--contrib/gcc/configure.ac48
-rw-r--r--contrib/gcc/cp-demangle.c15
-rw-r--r--contrib/gcc/cp/ChangeLog620
-rw-r--r--contrib/gcc/cp/Make-lang.in3
-rw-r--r--contrib/gcc/cp/call.c50
-rw-r--r--contrib/gcc/cp/class.c225
-rw-r--r--contrib/gcc/cp/cp-tree.h26
-rw-r--r--contrib/gcc/cp/cvt.c5
-rw-r--r--contrib/gcc/cp/cxx-pretty-print.c4
-rw-r--r--contrib/gcc/cp/decl.c242
-rw-r--r--contrib/gcc/cp/decl2.c10
-rw-r--r--contrib/gcc/cp/error.c12
-rw-r--r--contrib/gcc/cp/except.c24
-rw-r--r--contrib/gcc/cp/g++spec.c3
-rw-r--r--contrib/gcc/cp/init.c63
-rw-r--r--contrib/gcc/cp/mangle.c13
-rw-r--r--contrib/gcc/cp/method.c59
-rw-r--r--contrib/gcc/cp/name-lookup.c45
-rw-r--r--contrib/gcc/cp/parser.c419
-rw-r--r--contrib/gcc/cp/pt.c261
-rw-r--r--contrib/gcc/cp/search.c52
-rw-r--r--contrib/gcc/cp/semantics.c107
-rw-r--r--contrib/gcc/cp/tree.c35
-rw-r--r--contrib/gcc/cp/typeck.c30
-rw-r--r--contrib/gcc/cp/typeck2.c32
-rw-r--r--contrib/gcc/cppfiles.c49
-rw-r--r--contrib/gcc/cpplib.c8
-rw-r--r--contrib/gcc/cppmacro.c12
-rw-r--r--contrib/gcc/crtstuff.c15
-rw-r--r--contrib/gcc/cse.c42
-rw-r--r--contrib/gcc/cselib.c35
-rw-r--r--contrib/gcc/cselib.h1
-rw-r--r--contrib/gcc/dbxout.c182
-rw-r--r--contrib/gcc/defaults.h6
-rw-r--r--contrib/gcc/doc/extend.texi2007
-rw-r--r--contrib/gcc/doc/include/gcc-common.texi2
-rw-r--r--contrib/gcc/doc/invoke.texi113
-rw-r--r--contrib/gcc/doc/md.texi4
-rw-r--r--contrib/gcc/doc/tm.texi96
-rw-r--r--contrib/gcc/doc/trouble.texi2
-rw-r--r--contrib/gcc/dojump.c112
-rw-r--r--contrib/gcc/dwarf2out.c80
-rw-r--r--contrib/gcc/emit-rtl.c33
-rw-r--r--contrib/gcc/explow.c4
-rw-r--r--contrib/gcc/expmed.c4
-rw-r--r--contrib/gcc/expr.c7
-rw-r--r--contrib/gcc/expr.h3
-rw-r--r--contrib/gcc/f/ChangeLog38
-rw-r--r--contrib/gcc/f/bld.c2
-rw-r--r--contrib/gcc/f/malloc.c25
-rw-r--r--contrib/gcc/f/news.texi4
-rw-r--r--contrib/gcc/flow.c37
-rw-r--r--contrib/gcc/fold-const.c47
-rw-r--r--contrib/gcc/function.c81
-rw-r--r--contrib/gcc/gcc.c18
-rw-r--r--contrib/gcc/gcov-io.h56
-rw-r--r--contrib/gcc/gcse.c36
-rw-r--r--contrib/gcc/ginclude/stddef.h6
-rw-r--r--contrib/gcc/gthr-win32.h63
-rw-r--r--contrib/gcc/hooks.c7
-rw-r--r--contrib/gcc/hooks.h2
-rw-r--r--contrib/gcc/ifcvt.c26
-rw-r--r--contrib/gcc/jump.c17
-rw-r--r--contrib/gcc/libgcc-darwin.ver10
-rw-r--r--contrib/gcc/libgcc-std.ver11
-rw-r--r--contrib/gcc/libgcc2.c97
-rw-r--r--contrib/gcc/libgcc2.h50
-rw-r--r--contrib/gcc/loop-unroll.c17
-rw-r--r--contrib/gcc/loop.c353
-rw-r--r--contrib/gcc/mkheaders.in2
-rw-r--r--contrib/gcc/mklibgcc.in184
-rw-r--r--contrib/gcc/objc/objc-act.c1
-rw-r--r--contrib/gcc/params.def9
-rw-r--r--contrib/gcc/params.h2
-rw-r--r--contrib/gcc/postreload.c13
-rw-r--r--contrib/gcc/predict.c5
-rw-r--r--contrib/gcc/real.c7
-rw-r--r--contrib/gcc/reg-stack.c15
-rw-r--r--contrib/gcc/regclass.c155
-rw-r--r--contrib/gcc/regrename.c4
-rw-r--r--contrib/gcc/regs.h2
-rw-r--r--contrib/gcc/reload.c28
-rw-r--r--contrib/gcc/reload1.c33
-rw-r--r--contrib/gcc/rtl.h2
-rw-r--r--contrib/gcc/sibcall.c4
-rw-r--r--contrib/gcc/simplify-rtx.c139
-rw-r--r--contrib/gcc/target-def.h2
-rw-r--r--contrib/gcc/target.h5
-rw-r--r--contrib/gcc/toplev.c13
-rw-r--r--contrib/gcc/tree-inline.c10
-rw-r--r--contrib/gcc/tree.c49
-rw-r--r--contrib/gcc/tree.h3
-rw-r--r--contrib/gcc/unwind-compat.c206
-rw-r--r--contrib/gcc/unwind-compat.h35
-rw-r--r--contrib/gcc/unwind-dw2-fde-compat.c46
-rw-r--r--contrib/gcc/unwind-dw2-fde-glibc.c5
-rw-r--r--contrib/gcc/unwind-dw2.c30
-rw-r--r--contrib/gcc/varasm.c201
-rw-r--r--contrib/gcc/version.c2
193 files changed, 19397 insertions, 8748 deletions
diff --git a/contrib/gcc/ChangeLog b/contrib/gcc/ChangeLog
index efcecf7bd7ab..fe9363f7a23d 100644
--- a/contrib/gcc/ChangeLog
+++ b/contrib/gcc/ChangeLog
@@ -1,3 +1,2325 @@
+2005-05-19 Release Manager
+
+ * GCC 3.4.4 released.
+
+2005-05-11 Mark Mitchell <mark@codesourcery.com>
+
+ Revert:
+ 2005-05-10 H.J. Lu <hongjiu.lu@intel.com>
+ Backport from mainline
+ 2004-02-12 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+ * function.c (fixup_var_refs): Save volatile_ok and set to 1.
+ * expr.c (emit_block_move_via_movstr): Save and restore
+ volatile_ok.
+ 2005-05-09 Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com>
+ Backport from mainline
+ 2004-02-12 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+ * emit-rtl.c (set_mem_attributes_minus_bitpos): Don't kill previous
+ MEM_VOLATILE in REF.
+
+2005-05-11 Ben Elliston <bje@au.ibm.com>
+
+ * dwarf2out.c: Revert my 2005-05-10 patch.
+
+2005-05-10 H.J. Lu <hongjiu.lu@intel.com>
+
+ Backport from mainline
+ 2004-02-12 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+ * function.c (fixup_var_refs): Save volatile_ok and set to 1.
+ * expr.c (emit_block_move_via_movstr): Save and restore
+ volatile_ok.
+
+2005-05-10 Jakub Jelinek <jakub@redhat.com>
+
+ * config/i386/i386.md (sse_mov?fcc* splitters): Add mode to
+ IF_THEN_ELSE, remove mode from MATCH_OPERATOR. Fix a typo.
+
+2005-05-09 Grigory Tolstolytkin <gtolstolytkin@ru.mvista.com>
+
+ Backport from mainline
+ 2004-02-12 Richard Kenner <kenner@vlsi1.ultra.nyu.edu>
+ * emit-rtl.c (set_mem_attributes_minus_bitpos): Don't kill previous
+ MEM_VOLATILE in REF.
+
+2005-05-10 Ben Elliston <bje@au.ibm.com>
+
+ PR debug/16676
+ * dwarf2out.c (dwarf2out_decl): Always set context_die to NULL,
+ even when debug_info_level is DINFO_LEVEL_TERSE.
+
+2005-05-08 Stephane Carrez <stcarrez@nerim.fr>
+
+ * config/m68hc11/m68hc11.c (m68hc11_z_replacement): Use emit_insn_after
+ when adding the save Z instruction so that it is part of the good BB.
+ (reg_or_some_mem_operand): Do not allow the 68HC12 address indirect
+ addressing mode as it is not supported by bset and bclr.
+ (m68hc11_gen_movhi): Fix invalid generation of indexed indirect
+ addressing with movw.
+ (m68hc11_gen_movqi): Use pula and pulb instead of lda and ldb for
+ 68HC12.
+ * config/m68hc11/m68hc11.md ("movhi_const0"): Use this pattern only
+ for 68HC11.
+ ("*movhi_68hc12"): Handle movhi_const0.
+ ("*subhi3", "subqi3"): Use general_operand for operand 1.
+ ("*subhi3_zext"): Likewise.
+
+2005-05-08 Stephane Carrez <stcarrez@nerim.fr>
+
+ PR target/19051
+ * config/m68hc11/m68hc11.md ("mulqi3"): Use general_operand for operand
+ 1 and fix constraints.
+ ("mulqihi3"): Use general_operand for operand 2.
+
+2005-05-08 Richard Sandiford <rsandifo@redhat.com>
+
+ PR target/21416
+ * config/mips/mips.c (mips_emit_compare): Don't reverse UNGE and UNGT
+ comparisons.
+ * config/mips/mips.md (sungt_df, sunge_df, sungt_sf, sunge_sf): New
+ patterns.
+
+2005-05-08 Stephane Carrez <stcarrez@nerim.fr>
+
+ PR target/16925
+ * config/m68hc11/m68hc11.c (m68hc11_gen_highpart): Handle split of
+ 64-bit constants on 64-bit hosts.
+ (m68hc11_split_logical): Simplify.
+ (m68hc11_split_move): Likewise.
+
+2005-05-06 Bruce Korb <bkorb@gnu.org>
+ Joseph S. Myers <joseph@codesourcery.com>
+
+ * fixinc/inclhack.def: Correct backslashes
+ * fixinc/fixincl.x: regen
+
+2005-05-02 Janis Johnson <janis187@us.ibm.com>
+
+ PR 19985
+ * gcov-io.h: Declare gcov external functions hidden.
+
+2005-05-01 Gerald Pfeifer <gerald@pfeifer.com>
+
+ * config/freebsd-spec.h (FBSD_CPP_SPEC): Revert last change.
+
+2005-05-01 Gerald Pfeifer <gerald@pfeifer.com>
+
+ Backport from mainline
+ * doc/install.texi (Specific): Avoid using asterisks in @anchor
+ names related to target triplets.
+ Remove i?86-*-esix from platform directory.
+ Remove powerpc-*-eabiaix from platform directory.
+
+2005-05-01 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR bootstrap/20633
+ * config/freebsd-spec.h (FBSD_CPP_SPEC): Add %(cpp_arch).
+
+2005-05-01 Gerald Pfeifer <gerald@pfeifer.com>
+
+ * doc/install.texi (Specific): Omit dots in the @anchors names
+ for i?86-*-sco3.2v5* and sparc-sun-solaris2.7.
+ Omit underscores for x86_64-*-* and the "all ELF targets" entry.
+
+2005-05-01 Joseph S. Myers <joseph@codesourcery.com>
+
+ PR c/21213
+ * c-decl.c (finish_struct): Don't dereference NULL TYPE_FIELDS of
+ transparent union.
+
+2005-05-01 Joseph S. Myers <joseph@codesourcery.com>
+
+ PR c/20740
+ * c-format.c (init_dynamic_asm_fprintf_info): Give errors, not
+ aborts, if __gcc_host_wide_int__ is not properly defined.
+ (init_dynamic_diag_info): Give errors, not aborts, if location_t,
+ tree or __gcc_host_wide_int__ are not properly defined.
+
+2005-05-01 Joseph S. Myers <joseph@codesourcery.com>
+
+ PR c/11459
+ PR c/18502
+ * gcc.c (cpp_unique_options): Remove %{trigraphs}.
+ (cpp_options, cc1_options): Change %{std*} %{ansi} to
+ %{std*&ansi&trigraphs}.
+
+2005-04-29 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/21098
+ * config/rs6000/rs6000.c (rs6000_elf_end_indicate_exec_stack): New.
+ * config/rs6000/linux64.h (TARGET_ASM_FILE_END): Use the above.
+
+2005-04-25 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config.gcc (avr-*-*): Remove redundant "case".
+
+2005-04-25 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ PR target/17822
+ * config/avr/t-avr (AR_FOR_TARGET,RANLIB_FOR_TARGET): Remove.
+
+2005-04-25 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ PR target/17824
+ * config/c4x/c4x.h (ASM_PROG, LD_PROG): Remove.
+
+2005-04-22 David Edelsohn <edelsohn@gnu.org>
+
+ Backport from mainline.
+ * config/rs6000/rs6000.c (rs6000_init_libfuncs): Set TFmode
+ optabs to xlq names if TARGET_XL_COMPAT.
+
+2005-04-22 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * doc/invoke.texi (SPARC options): Document that -mapp-regs
+ is turned off by default on Solaris.
+
+2005-04-15 Dave Korn <dave.korn@artimi.com>
+
+ * gcc.c (default_compilers): Clarify obscure error message when
+ reading from standard input.
+
+2005-04-15 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * config/sparc/sparc.c (legitimate_address_p): Use TARGET_ARCH32.
+
+2005-04-11 David Edelsohn <edelsohn@gnu.org>
+
+ Backport from mainline:
+ * config/rs6000/aix52.h (atoll): Declare.
+
+2005-04-08 Ulrich Weigand <uweigand@de.ibm.com>
+
+ Backport from mainline:
+ * config/s390/tpf.h (ASM_SPEC): Define.
+
+2005-04-06 Dale Johannesen <dalej@apple.com>
+
+ PR middle-end/19225
+ * calls.c (expand_call): Flush pending deferrals before
+ throwing call.
+
+2005-04-06 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR target/17245
+ * config/sparc/sparc.c (legitimate_address_p): Remove 'imm2'.
+ Revert 2004-10-08 patch. Reject TFmode LO_SUM in 32-bit mode.
+
+2005-04-05 James E. Wilson <wilson@specifixinc.com>
+
+ PR target/20670
+ * unwind-ia64.c (uw_intall_context): Add missing load of r27.
+
+2005-04-04 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * doc/invoke.texi (SPARC options): Document that
+ -mlittle-endian is not supported on Linux either.
+
+2005-03-25 Gabriel Dos Reis <gdr@integrable-solutions.net>
+
+ PR c++/18644
+ * doc/invoke.texi (-Wsynth): Don't document, as it now is void of
+ semantics.
+
+2005-03-31 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/20611
+ * config/rs6000/rs6000.md (load_toc_v4_PIC_1b): Remove inline
+ label operand. Remove (use (unspec..)). Don't emit a label on
+ the offset word.
+ * config/rs6000/rs6000.c (rs6000_legitimize_tls_address): Don't
+ generate inline label for load_toc_v4_PIC_1b.
+ (rs6000_emit_load_toc_table): Likewise.
+
+2005-03-31 Alan Modra <amodra@bigpond.net.au>
+
+ * config.gcc (cpu_is_64bit): Set for 64-bit powerpc cpus.
+ (powerpc64-*-linux*): Use it. Rearrange tm_file assignment.
+ (powerpc-*-linux*): Build a biarch compiler when --enable-targets
+ is given with "powerpc64*" or "all", or when --with-cpu chooses
+ a 64-bit cpu.
+
+2005-03-30 Alan Modra <amodra@bigpond.net.au>
+
+ * doc/install.texi: Update binutils requirement for powerpc*-linux.
+
+2005-03-25 John David Anglin <dave.anglin@nrc-crnc.gc.ca>
+
+ PR target/15491
+ * vax.c (vax_rtx_costs_1): Merge with vax_rtx_costs.
+ (vax_rtx_costs): Return false when passed unsupported rtx's. Handle
+ FLOAT_EXTEND, FLOAT_TRUNCATE and TRUNCATE. Fix costs for POST_INC,
+ PRE_DEC, NEG and NOT.
+
+2005-03-23 Uros Bizjak <uros@kss-loka.si>
+
+ * simplify-rtx.c (simplify_unary_operation) <NOT>: Add missing break
+ in code that deals with logicals on floats.
+
+2005-03-22 Uros Bizjak <uros@kss-loka.si>
+
+ PR target/14981
+ Backport from mainline
+ 2004-02-18 Jan Hubicka <jh@suse.cz>
+ * simplify-rtx.c (simplify_unary_operation): Deal with logicals on
+ floats.
+ (simplify_binary_operation): Deal with logicals on floats.
+ * i386.md (SSE fabs splitters): Emit new patterns.
+ (SSE cmov splitters): Likewise.
+ (sse_andv4sf3, sse_nandv4sf3, sse_iorv4sf3, sse_xorv4sf3
+ (sse_andv2df3, sse_nandv2df3, sse_iorv2df3, sse_xorv2df3): Do not use
+ subregs.
+ (sse_andsf3, sse_nandsf3, sse_xorsf3): Kill.
+ (sse_anddf3, sse_nanddf3, sse_xordf3): Kill.
+
+2005-03-20 Marek Michalkiewicz <marekm@amelek.gda.pl>
+
+ PR target/18551
+ * config/avr/avr.c (avr_output_function_prologue): Do not use
+ current_function_name() in a label, use a local label instead.
+
+2005-03-19 Joseph S. Myers <joseph@codesourcery.com>
+
+ * c.opt (ansi, std=iso9899:1990, std=iso9899:1999,
+ std=iso9899:199x): Correct descriptions.
+
+2005-03-19 Andy Hutchinson <HutchinsonAndy@netscape.net>
+
+ PR target/18251
+ * config/avr/avr.md (movstrhi): Rewrite as RTL loop.
+ (*movstrqi_insn): Delete.
+ (*movstrhi): Delete.
+
+2005-03-17 Richard Sandiford <rsandifo@redhat.com>
+
+ PR rtl-optimization/19683
+ * reload1.c (choose_reload_regs): Pass the number of bits, not the
+ number of bytes, to smallest_int_for_mode. Fix arguments to
+ REG_CANNOT_CHANGE_MODE_P.
+
+2005-03-17 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/linux-unwind.h: New file backported from gcc-4.0.
+ * config/rs6000/linux.h: #include linux-unwind.h in place of all
+ unwind support in this file.
+ * config/rs6000/linux64.h: Likewise.
+
+2005-03-16 Roger Sayle <roger@eyesopen.com>
+
+ PR rtl-optimization/17825
+ Backport from mainline
+ 2004-11-27 Jakub Jelinek <jakub@redhat.com>
+ * combine.c (subst): Ignore STRICT_LOW_PART no matter if REG_P (new)
+ or not.
+
+2005-03-16 Roger Sayle <roger@eyesopen.com>
+
+ PR target/18371
+ Backport from mainline
+ 2004-12-19 Steven Bosscher <stevenb@suse.de>
+ * config/i386/i386.c (ix86_split_to_parts): Use an array with
+ four elements for decoding a CONST_DOUBLE on 64 bits targets.
+
+2005-03-14 Alan Modra <amodra@bigpond.net.au>
+
+ * config.gcc: Remove excess indentation.
+ (powerpc*-*-*, rs6000-*-*): Accept --with-cpu/tune power5.
+
+2005-03-13 Marek Michalkiewicz <marekm@amelek.gda.pl>
+
+ PR target/20288
+ * config/avr/avr.c (print_operand): Add 'p' and 'r'.
+ (out_movhi_r_mr): Read low byte of volatile MEM first.
+ (out_movhi_mr_r): Write high byte of volatile MEM first.
+
+2005-03-10 Aldy Hernandez <aldyh@redhat.com>
+
+ * doc/invoke.texi: Add 8540 to list of cpus in rs6000 cpu section.
+
+2005-03-08 James E Wilson <wilson@specifixinc.com>
+
+ Backport from mainline
+ 2004-04-13 James E Wilson <wilson@specifixinc.com>
+ PR middle-end/20364
+ * c-opt.c (c_common_post_options): If this_input_filename is NULL,
+ increment errorcount and return false instead of true.
+
+ Backport from mainline
+ 2005-02-21 James E Wilson <wilson@specifixinc.com>
+ * toplev.c (backend_init): Don't call init_adjust_machine_modes here.
+ (do_compile): Do call it here.
+
+2005-03-07 David Billinghurst <David.Billinghurst@riotinto.com>
+
+ * config/i386/cygwin1.c(mingw_scan): Use xstrdup in calls to putenv.
+
+2005-03-03 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/20277
+ * config/rs6000/rs6000.c (rs6000_override_options): Don't allow
+ -mcpu to override any other explicitly given flags.
+
+2005-03-02 Mark Mitchell <mark@codesourcery.com>
+
+ PR c++/19916
+ * varasm.c (initializer_constant_valid_p): Allow conversions
+ between OFFSET_TYPEs. Tidy.
+
+2005-02-28 John David Anglin <dave.anglin#nrc-cnrc.gc.ca>
+
+ PR target/19819
+ * pa.h (GO_IF_LEGITIMATE_ADDRESS): Allow allow hard registers during
+ and after reload in REG+REG indexed addresses without REG_POINTER
+ set in the base and not set in the index.
+
+2005-02-27 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ PR rtl-optimization/17728
+ * pa.md (mulsi3, divsi3, udivsi3, modsi3): Change predicate for
+ operand 0 from general_operand to move_dest_operand.
+
+2005-02-26 Paolo Carlini <pcarlini@suse.de>
+
+ * doc/extend.texi (Declaring Attributes of Functions)<noreturn>:
+ Clarify that the alternative way doesn't work in GNU C++.
+
+2005-02-25 David Edelsohn <edelsohn@gnu.org>
+
+ Backport from mainline:
+ 2005-02-24 David Edelsohn <edelsohn@gnu.org>
+ PR target/19019
+ * reload.c (operands_match_p): Only increment register number for
+ SCALAR_INT_MODE_P modes in multiple hard registers.
+ * config/rs6000/rs6000.md (trunctfdf2): Remove register constraints.
+ Fix formatting.
+
+2005-02-24 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/19019
+ * Makefile.in (LIB2FUNCS_SHARED_EXTRA, LIB2ADD_SH): New.
+ (libgcc.mk): Depend on $(LIB2ADD_SH), pass LIB2ADD_SH to mklibgcc.
+ (LIBGCC_DEPS): Add $(LIB2ADD_SH).
+ * mklibgcc.in: Handle LIB2ADD_SH.
+ * config/rs6000/t-linux64 (LIB2FUNCS_EXTRA): Remove darwin-ldouble.c.
+ (LIB2FUNCS_STATIC_EXTRA, LIB2FUNCS_SHARED_EXTRA): Set.
+ * config/rs6000/darwin-ldouble.c: Protect .symver asm also with
+ defined IN_LIBGCC2_S.
+ * config/rs6000/darwin-ldouble-shared.c: New file.
+
+2005-02-23 Michael Beach <michaelb@ieee.org>
+
+ PR target/20159
+ * config/sparc/t-elf (startup files): Assemble with CPP.
+
+2005-02-17 Gerald Pfeifer <gerald@pfeifer.com>
+
+ * doc/install.texi (Specific): Update link for Darwin-specific
+ tool binary site.
+
+2005-02-16 David Edelsohn <edelsohn@gnu.org>
+
+ PR target/19019
+ Backport from mainline:
+ 2005-02-16 David Edelsohn <edelsohn@gnu.org>
+ * config/rs6000/t-aix43 (SHLIB_MAPFILES): Add libgcc-ppc64.ver.
+ * config/rs6000/t-aix52 (SHLIB_MAPFILES): Same.
+ 2005-02-15 David Edelsohn <edelsohn@gnu.org>
+ Alan Modra <amodra@bigpond.net.au>
+ * config/rs6000/darwin-ldouble.c (_xlqadd): Rename to __gcc_qadd.
+ (_xlqsub): Rename to __gcc_qsub.
+ (_xlqmul): Rename to __gcc_qmul.
+ (_xlqdiv): Rename to __gcc_qdiv.
+ Provide versioned symbol aliases with old names.
+ * config/rs6000/libgcc-ppc64.ver: Rename symbols.
+ * config/rs6000/rs6000.c (rs6000_init_libfuncs): Rename symbols.
+ * config/rs6000/t-aix43 (LIB2FUNCS_EXTRA): New.
+ * config/rs6000/t-aix52 (LIB2FUNCS_EXTRA): New.
+ * config/rs6000/t-newas (LIB2FUNCS_EXTRA): New.
+ 2005-02-14 David Edelsohn <edelsohn@gnu.org>
+ * config/rs6000/rs6000.md (trunctfdf2): Change to define_expand.
+ (trunctfdf2_internal1): New.
+ (trunctfdf2_internal2): Renamed from trunctfdf2.
+ 2005-02-13 David Edelsohn <edelsohn@gnu.org>
+ * config/rs6000/aix.h ({TARGET,MASK}_XL_CALL): Rename to
+ {TARGET,MASK}_XL_COMPAT.
+ (SUBTARGET_SWITCHES): Rename xl-call to xl-compat. Use
+ MASK_XL_COMPAT.
+ * config/rs6000/beos.h ({TARGET,MASK}_XL_CALL): Remove.
+ * config/rs6000/rs6000.c (function_arg): Change TARGET_XL_CALL to
+ TARGET_XL_COMPAT.
+ (rs6000_arg_partial_bytes): Same.
+ (rs6000_generate_compare): Generate PARALLEL for compare if TFmode
+ and XL compatibility enabled.
+ * config/rs6000/rs6000.h (TARGET_XL_CALL): Rename to TARGET_XL_COMPAT.
+ * config/rs6000/rs6000.md (cmptf_internal1): Add !TARGET_XL_COMPAT
+ test to final condition.
+ (cmptf_internal2): New.
+ * doc/invoke.texi (RS/6000 Subtarget Options): Change xl-call to
+ xl-compat. Add TFmode information to description.
+
+2005-02-16 Eric Christopher <echristo@redhat.com>
+
+ PR preprocessor/19077
+ * cppmacro.c (cpp_macro_definition): Move handling of whitespace
+ to PREV_WHITE conditional. Remove overloading of len
+ variable.
+
+2005-02-16 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR target/19715
+ * config.gcc (sparc-*-elf*): Include sparc/sol2-gld.h.
+ (sparc-*-rtems*): Likewise.
+ (sparclite-*-elf*): Likewise.
+ (sparc86x-*-elf*): Likewise.
+ (sparc64-*-elf*): Likewise.
+
+2005-02-11 John David Anglin <dave.anglin@nrc-crnc.gc.ca>
+
+ PR middle-end/19697
+ 2005-01-30 Roger Sayle <roger@eyesopen.com>
+ * config/pa/pa.md (anddi3, iordi3): On HPPA64, disallow an integer
+ constant as the second operand and a register as the third.
+
+2005-02-11 Jakub Jelinek <jakub@redhat.com>
+
+ PR c++/19666
+ 2004-06-08 Andrew Pinski <pinskia@physics.uc.edu>
+ * fold-const.c (fold_convert): Treat OFFSET_TYPE like
+ POINTER_TYPE and INTEGER_TYPE.
+
+ * config/rs6000/sysv4.h (ENDFILE_LINUX_SPEC): Use crtendS.o instead of
+ crtend.o if -pie. Use %{x:a;:b} spec syntax.
+
+2005-02-10 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/19579
+ * ifcvt.c (noce_try_cmove_arith): If emitting instructions to set up
+ both A and B, see if they don't clobber registers the other expr uses.
+
+2005-02-08 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/19803
+ * predict.c (PROB_VERY_UNLIKELY): Use 1% instead of 10%.
+
+2005-02-07 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config/m68k/t-rtems (MULTILIB_MATCHES): Let m528x match m5200.
+
+2005-02-03 Richard Guenther <rguenth@gcc.gnu.org>
+
+ PR middle-end/19775
+ * builtins.c (fold_builtin_sqrt): Transform
+ sqrt(pow(x,y)) to pow(fabs(x),y*0.5), not
+ pow(x,y*0.5).
+
+2005-02-01 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/16201
+ * arm.c (arm_eliminable_register): New function.
+ (adjacent_mem_locations): Don't allow eliminable registers. Use
+ HOST_WIDE_INT for address offsets.
+ * arm-protos.h (arm_eliminable_register): Add prototype.
+
+2005-01-31 Daniel Jacobowitz <dan@codesourcery.com>
+
+ 2004-09-22 Mark Mitchell <mark@codesourcery.com>
+ * gcc/dwarf2out.c (scope_die_for): If the containing scope is a
+ TRANSLATION_UNIT_DECL, consider it to be a global.
+
+2005-01-29 Alan Modra <amodra@bigpond.net.au>
+
+ * unwind-dw2.c (execute_stack_op): Add missing cases for
+ DW_OP_shl, DW_OP_shr, DW_OP_shra, DW_OP_xor.
+
+2005-01-28 Stephane Carrez <stcarrez@nerim.fr>
+
+ PR target/15384
+ * config/m68hc11/t-m68hc11-gas (dp-bit.c): Fix typo causing a
+ configuration part of dp-bit.c to be lost.
+
+2005-01-27 Ulrich Weigand <uweigand@de.ibm.com>
+
+ PR target/17771
+ Backport from mainline:
+ * config/s390/s390.md ("reload_outti"): Remove predicate for
+ output operand. Abort if operand is not a MEM.
+ ("reload_outdi", "reload_outdf"): Likewise.
+
+2005-01-27 Marek Michalkiewicz <marekm@amelek.gda.pl>
+
+ PR target/19293
+ PR target/19329
+ * config/avr/avr.c (notice_update_cc): Only set condition code for
+ ashrqi3 if shift count > 0.
+ (out_shift_with_cnt): Handle shift count <= 0 as a no-op.
+ (ashlqi3_out, ashlhi3_out, ashlsi3_out, ashrqi3_out, ashrhi3_out,
+ ashrsi3_out, lshrqi3_out, lshrhi3_out, lshrsi3_out): Handle shift
+ count <= 0 as a no-op, and shift count >= width by copying zero
+ or sign bit to all bits of the result.
+ * config/avr/avr.md (all shifts): Add alternatives for zero shift
+ count, with attribute "length" set to 0 and "cc" set to "none".
+
+2005-01-27 J"orn Rennecke <joern.rennecke@st.com>
+
+ * real.c (do_add): Initialize signalling and canonical members.
+
+ * real.c (real_from_integer): Zero out destination.
+
+2005-01-26 Ulrich Weigand <uweigand@de.ibm.com>
+
+ Backport from mainline:
+ * dbxout.c (dbxout_symbol_location): Resolve constant pool references
+ even for variables with NULL DECL_INITIAL.
+
+2005-01-25 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/19393
+ Backport:
+ 2004-03-30 Nick Clifton <nickc@redhat.com>
+ * config/arm/arm.md (thumb_jump): Reduce the backward branch
+ range, and increase the forward branch range, to allow for
+ the fact that the PC will be off by 4.
+
+2005-01-24 Richard Henderson <rth@redhat.com>
+ Aldy Hernandez <aldyh@redhat.com>
+
+ * regrename.c (note_sets): Handle subregs.
+
+2005-01-24 Jakub Jelinek <jakub@redhat.com>
+
+ * flow.c (propagate_one_insn): Formatting.
+
+ PR middle-end/19551
+ * flow.c (libcall_dead_p): Be more conservative if unsure.
+ If there are any instructions between insn and call, see if they are
+ all dead before saying the libcall is dead.
+
+2005-01-22 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ PR target/19548
+ * config/rs6000/rtems.h: Resurrect cpp_os_rtems_spec from gcc < 3.4.
+ (CPP_OS_RTEMS_SPEC): New (From gcc-3.3's config/rs6000/sys4.h).
+ (SUBSUBTARGET_EXTRA_SPECS): Use CPP_OS_RTEMS_SPEC.
+
+2005-01-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/15139
+ * combine.c: Include params.h.
+ (count_rtxs): New function.
+ (record_value_for_reg): If replace_rtx would replace at least
+ 2 occurrences of REG in VALUE and TEM is really large, replace REG with
+ (clobber (const_int 0)) instead of TEM.
+ * params.def (PARAM_MAX_LAST_VALUE_RTL): New.
+ * params.h (MAX_LAST_VALUE_RTL): New.
+ * Makefile.in (combine.o): Depend on $(PARAMS_H).
+ * doc/invoke.texi (--param max-last-value-rtl=N): Document.
+
+ PR c/17297
+ * c-typeck.c (digest_init): Only call build_vector if all constructor
+ elements are *_CST nodes.
+
+ PR middle-end/19164
+ * c-typeck.c (digest_init): Only call build_vector if inside_init
+ is a CONSTRUCTOR.
+
+2005-01-18 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR debug/16261
+ Backport from mainline:
+ 2004-01-27 Devang Patel <dpatel@apple.com>
+
+ * dwarf2out.c: (remove_child_TAG): New function.
+ (gen_subprogram_die): Do not remove all children dies while reusing
+ declaration die for definition. Instead, selectively remove only
+ formal parameters.
+
+2005-01-18 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR rtl-optimization/19296
+ * combine.c (simplify_comparison): Rewrite the condition under
+ which a non-paradoxical SUBREG of a PLUS can be lifted when
+ compared against a constant.
+
+2005-01-17 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ * varasm.c (process_pending_assemble_output_defs): Fix previous change.
+
+2005-01-16 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ PR target/16304
+ * defaults.h (TARGET_DEFERRED_OUTPUT_DEFS): Provide default.
+ * toplev.c (compile_file): Call process_pending_assemble_output_defs
+ just before targetm.asm_out.file_end.
+ * tree.h (process_pending_assemble_output_defs): Declare.
+ * varasm.c (assemble_output_def, process_pending_assemble_output_defs):
+ New functions.
+ (assemble_alias): Defer generation of assembly code for defines when
+ TARGET_DEFERRED_OUTPUT_DEFS is true.
+ * config/rs6000/aix41.h (TARGET_DEFERRED_OUTPUT_DEFS): Define.
+ * config/rs6000/aix43.h (TARGET_DEFERRED_OUTPUT_DEFS): Define.
+ * doc/tm.texi (TARGET_DEFERRED_OUTPUT_DEFS): document.
+
+2005-01-15 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config/mips/rtems.h (MIPS_DEFAULT_GVALUE): Set to 0.
+ * config/mips/t-rtems (MULTILIBS_DIRNAMES,MULTILIB_OPTIONS):
+ Remove little endian multilib variants.
+ Add mips32 multilib variant.
+
+2005-01-14 David Edelsohn <edelsohn@gnu.org>
+
+ * config/rs6000/aix52.h (CPLUSPLUS_CPP_SPEC): Revert last change.
+
+2005-01-13 David O'Brien <obrien@FreeBSD.org>
+
+ Backport from mainline:
+ * config/freebsd-spec.h: Make KSE pthread lib logic the default.
+
+2005-01-13 David Edelsohn <edelsohn@gnu.org>
+
+ * config/rs6000/aix52.h (CPLUSPLUS_CPP_SPEC): Change _XOPEN_SOURCE
+ definition to 600.
+
+2005-01-13 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config/i386/t-rtems-i386: Multilib on -mtune instead of -mcpu.
+
+2004-01-12 David Mosberger <davidm@hpl.hp.com>
+ James E Wilson <wilson@specifixinc.com>
+
+ PR target/18987
+ * config/ia64/ia64.c (process_set): For alloc insn, only call
+ process_epilogue is !frame_pointer_needed.
+
+ PR target/13158
+ * config/ia64/ia64.c (ia64_expand_epilogue): Set RTX_FRAME_RELATED_P on
+ sibcall alloc instruction.
+ (process_set): Handle sibcall alloc instruction.
+
+2005-01-10 David Edelsohn <edelsohn@gnu.org>
+
+ PR target/18720
+ Backport from mainline
+ * collect2.c (main): Set aixrtl_flag for -brtl option.
+ (resolve_lib_name): Search for .so file extension before .a
+ if aixrtl_flag set.
+
+2005-01-08 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/19012
+ * config/i386/i386.md (addqi_1_slp): Set memory attribute.
+
+2005-01-07 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * configure.ac (HAVE_AS_OFFSETABLE_LO10): Fix typo.
+ * configure: Regenerate.
+
+2005-01-07 Jakub Jelinek <jakub@redhat.com>
+
+ * c-common.c (handle_mode_attribute): For ENUMERAL_TYPE, also copy
+ TYPE_MODE.
+
+2005-01-06 Richard Sandiford <rsandifo@redhat.com>
+
+ PR rtl-opt/13299
+ * loop.c (get_monotonic_increment, biased_biv_fits_mode_p,
+ biv_fits_mode_p, extension_within_bounds_p): New functions.
+ (check_ext_dependent_givs): Use them.
+
+2005-01-05 Richard Henderson <rth@redhat.com>
+
+ PR rtl-opt/10692
+ * reload1.c (do_input_reload): Restrict the optimization deleteing
+ a previous output reload to RELOAD_FOR_INPUT.
+
+2005-01-06 Jakub Jelinek <jakub@redhat.com>
+
+ Backport from mainline:
+ 2004-03-22 Diego Novillo <dnovillo@redhat.com>
+
+ * c-typeck.c (same_translation_unit_p): Fix pasto.
+
+2005-01-02 Roger Sayle <roger@eyesopen.com>
+ Andrew Pinski <pinskia@physics.uc.edu>
+ James E. Wilson <wilson@specifixinc.com>
+
+ PR rtl-optimization/12092
+ * loop.c (emit_prefetch_instructions): Do nothing if PREFETCH_BLOCK
+ is zero.
+
+2004-12-30 Roger Sayle <roger@eyesopen.com>
+
+ PR middle-end/19175
+ * loop-unroll.c (expand_bct): Pass the code_label to the function
+ do_compare_rtx_and_jump, not the label ref. Clean-up style issues.
+
+2004-12-27 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ * vax.c (vax_address_cost, vax_rtx_cost): Correct casts.
+ (vax_rtx_cost): Handle small offsets for both PLUS and MINUS.
+
+2004-12-27 Steven Bosscher <stevenb@suse.de>
+ John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ rtl-optimization/12863
+ * config/vax/vax.h (CASE_DROPS_THROUGH): Don't define.
+ * config/vax/vax.md (casesi): Emit a test-and-branch to make sure
+ that the case is in range, to make sure the casesi insn is always
+ in range and never falls through.
+ (casesi1): Add comment to explain why casesi never falls through.
+ Remove the unnamed special case casesi pattern.
+
+2004-12-27 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ PR c++/14607.
+ Backported from main.
+ * configure.ac (HAVE_GAS_NSUBSPA_COMDAT): Add check for .NSUBSPA
+ COMDAT support.
+ * configure. config.in: Rebuilt.
+ * config/pa/pa-protos.h (som_text_section_asm_op,
+ som_readonly_data_section, som_one_only_readonly_data_section,
+ som_one_only_data_section, forget_section): Declare.
+ * pa.c (override_options): Set init_machine_status to
+ pa_init_machine_status.
+ (pa_init_machine_status): New function.
+ (pa_output_function_epilogue): Call forget_section if TARGET_SOM and
+ TARGET_GAS.
+ (pa_asm_output_mi_thunk): Likewise.
+ (som_text_section_asm_op): New function.
+ (pa_select_section): Call som_one_only_readonly_data_section and
+ som_one_only_data_section when appropriate.
+ * pa.h (struct machine_function): Define.
+ (EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS,
+ SOM_READONLY_DATA_SECTION_FUNCTION,
+ SOM_ONE_ONLY_READONLY_DATA_SECTION_FUNCTION
+ SOM_ONE_ONLY_DATA_SECTION_FUNCTION, FORGET_SECTION_FUNCTION): New
+ macros.
+ * som.h (ASM_OUTPUT_FUNCTION_PREFIX): Delete.
+ (TEXT_SECTION_ASM_OP): Call som_text_section_asm_op.
+ (READONLY_DATA_ASM_OP, EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS): Delete.
+ (READONLY_DATA_SECTION): Call som_readonly_data_section when not PIC.
+ (SUPPORTS_SOM_COMDAT): New define.
+ (SUPPORTS_ONE_ONLY): True if SUPPORTS_WEAK or SUPPORTS_SOM_COMDAT.
+ (MAKE_DECL_ONE_ONLY): Rework common support.
+
+2004-12-26 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ PR target/17643
+ * pa.c (pa_function_ok_for_sibcall): Sibcalls are not ok when
+ generating code for the portable runtime.
+
+2004-12-25 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/19147
+ * config/rs6000/rs6000.md (andsi3_internal7, andsi3_internal8): Delete.
+
+2004-12-23 Richard Henderson <rth@redhat.com>
+
+ PR c/18282
+ * c-decl.c (finish_enum): Retain precision acquired from an attribute.
+
+2004-12-23 Alexandre Oliva <aoliva@redhat.com>
+
+ PR target/16819
+ * calls.c (load_register_parameters): Don't call use_regs when
+ nregs is zero.
+
+2004-12-22 Richard Henderson <rth@redhat.com>
+
+ PR target/19102
+ * config/i386/i386.c (x86_inter_unit_moves): Disable.
+ (ix86_hard_regno_mode_ok): Disallow SSE2 and MMX scalar modes
+ in SSE registers when only SSE1 enabled.
+
+2004-12-21 David O'Brien <obrien@FreeBSD.org>
+
+ Backport from mainline:
+ * config/freebsd-spec.h: Use KSE pthread lib for -pthread.
+
+2004-12-19 Richard Henderson <rth@redhat.com>
+
+ * config/i386/i386.c (ix86_hard_regno_mode_ok): Always accept all SSE,
+ MMX, 3DNOW modes in SSE registers; always accept all MMX, 3DNOW modes
+ in MMX registers.
+ * config/i386/i386.h (VALID_SSE2_REG_MODE): Don't include
+ VALID_MMX_REG_MODE.
+ * config/i386/i386.md (movv4sf_internal, movv4si_internal,
+ movv2di_internal, movv2si_internal, movv4hi_internal,
+ movv2sf_internal, movv2df_internal, movv8hi_internal,
+ movv16qi_internal, movti_internal): Add leading '*' to name.
+ (movv2di_internal, movv2df_internal, movv8hi_internal,
+ movv16qi_internal, movv2df, movv8hi, movv16qi, movv2di,
+ pushv2di, pushv8hi, pushv16qi): Enable for SSE1.
+ (movv2si_internal, movv4hi_internal): Add SSE alternatives.
+ (movv8qi_internal, movv2sf_internal): Likewise.
+ (movtf): Simplify conditional.
+ (movv2sf, pushv2sf): Enable for MMX.
+
+2004-12-19 Roger Sayle <roger@eyesopen.com>
+
+ PR middle-end/19068
+ * expr.c (expand_expr_real_1) <MAX_EXPR>: Ensure that target, op0
+ and op1 are all registers (or constants) before expanding the RTL
+ comparison sequence [to avoid reg_overlap_mentioned (target, op1)].
+
+2004-12-18 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR rtl-optimization/16968
+ * loop.c (scan_loop): Stop scanning the loop for movable
+ insns as soon as an optimization barrier is encountered.
+
+2004-12-16 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR other/18508
+ * config/alpha/t-osf4 (SHLIB_LINK): Use `.backup' as the suffix
+ to back up the existing shared library.
+ * config/arm/t-netbsd (SHLIB_LINK): Likewise.
+ * config/mips/t-iris5-6 (SHLIB_LINK): Likewise.
+ * config/pa/t-hpux-shlib (SHLIB_LINK): Likewise.
+ * config/sh/t-linux (SHLIB_LINK): Likewise.
+ * config/t-libunwind-elf (SHLIBUNWIND_LINK): Likewise.
+ * config/t-slibgcc-darwin (SHLIB_LINK): Likewise.
+ * config/t-slibgcc-elf-ver (SHLIB_LINK): Likewise.
+ * config/t-slibgcc-sld (SHLIB_LINK): Likewise.
+
+2004-12-16 Roger Sayle <roger@eyesopen.com>
+
+ PR middle-end/18493
+ * c-typeck.c (c_finish_case): Rechain statements if we didn't
+ encounter any case labels or a default.
+
+2004-12-16 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR middle-end/18882
+ * function.c (assign_stack_local_1): Use BITS_PER_UNIT alignment
+ when passed -2 as 'align'.
+ (put_var_into_stack): Use 'bool' as the type for the three local
+ predicates. Adjust calls to put_reg_into_stack.
+ When passed a CONCAT, instruct put_reg_into_stack to use
+ a consecutive stack slot for the second part.
+ (put_reg_into_stack): Remove 'promoted_mode' parameter, add
+ 'consecutive_p' parameter. Turn the three predicates into 'bool'
+ parameters. Retrieve the register mode from 'reg'.
+ When consecutive_p is true, instruct assign_stack_local_1 to use
+ BITS_PER_UNIT alignment.
+ (put_addressof_into_stack): Use 'bool' as the type for the two
+ local predicates. Adjust call to put_reg_into_stack.
+
+2004-12-16 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR middle-end/18590
+ * function.c (fixup_var_refs_insns_with_hash): Do not invoke
+ fixup_var_refs_insn on insns marked as deleted.
+
+2004-12-15 Richard Henderson <rth@redhat.com>
+
+ PR target/19028
+ * config/i386/i386.md (sse compare splitter): Test for SF and DFmode
+ explicitly instead of using VALID_SSE_REG_MODE.
+
+2004-12-15 Richard Henderson <rth@redhat.com>
+
+ PR target/19005
+ * config/i386/i386.md (swaphi_1): Swap with swaphi_2, allow with
+ optimize_size.
+ (swapqi_1): Rename from swapqi. Enable only for no partial reg
+ stall and optimize_size.
+ (swapqi_2): New.
+ (swaphi_1, swaphi_2, swapqi_1): Add athlon_decode.
+ (swapsi, swaphi_1, swaphi_2, swapqi_1, swapdi): Remove modrm override.
+
+2004-12-15 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/18153
+ * configure.ac: Define HAVE_LD_STATIC_DYNAMIC if linker supports
+ -Bstatic/-Bdynamic option.
+ * config.in: Regenerated.
+ * configure: Likewise.
+
+ * gcc.c (init_spec): Pass -Bstatic/-Bdynamic to ld for static
+ -lunwind if possible.
+
+2004-12-15 Richard Henderson <rth@redhat.com>
+
+ PR target/19010
+ * config/i386/i386.c (gen_reg_or_parallel): New.
+ (function_arg): Use it.
+ (ix86_hard_regno_mode_ok): Test SSE1 and SSE2 separately,
+ MMX and 3DNOW separately.
+ (ix86_rtx_costs): Simplify FLOAT_EXTEND case.
+ * config/i386/i386.h (VALID_SSE2_REG_MODE): Move SSE2 cases from ...
+ (VALID_SSE_REG_MODE): ... here.
+ * config/i386/i386.md (movv4sf_internal): Validate one MEM.
+ (movv4si_internal): Likewise.
+ (movv2di_internal): Likewise. Enable for SSE2 only.
+ (movv2di): Enable for SSE2 only.
+ (pushv4si): Enable for SSE1.
+
+2004-12-15 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR c++/17972
+ * tree-inline.c (expand_call_inline): Set TREE_SIDE_EFFECTS
+ on the STMT_EXPR wrapping up the inlined body.
+
+2004-12-15 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR preprocessor/15167
+ * cppfiles.c (destroy_cpp_file): New function.
+ (should_stack_file): Make a new file if the
+ compared file is still stacked.
+
+2004-12-15 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR other/18665
+ * libgcc-std.ver (GCC_3.4.4): Inherit from GCC_3.4.2.
+ Export __absvti2, __addvti3, __mulvti3, __negvti2 and __subvti3.
+ * libgcc-darwin.ver (GCC_3.4.4): Inherit from GCC_3.4.
+ Export __absvti2, __addvti3, __mulvti3, __negvti2 and __subvti3.
+ * libgcc2.c (__addvsi3): Rename to __addvSI3.
+ New version if COMPAT_SIMODE_TRAPPING_ARITHMETIC.
+ (__addvdi3): Rename to __addvDI3.
+ (__subvsi3): Rename to __subvSI3. Use word type for the result.
+ New version if COMPAT_SIMODE_TRAPPING_ARITHMETIC.
+ (__subvdi3): Rename to __subvDI3.
+ (_mulvsi3): Rename to _mulvSI3.
+ New version if COMPAT_SIMODE_TRAPPING_ARITHMETIC.
+ (_mulvdi3): Rename to _mulvDI3.
+ (__negvsi2): Rename to __negvSI2.
+ New version if COMPAT_SIMODE_TRAPPING_ARITHMETIC.
+ (__negvdi2): Rename to __negvDI2.
+ (__absvsi2): Rename to __absvSI2.
+ New version if COMPAT_SIMODE_TRAPPING_ARITHMETIC.
+ (__absvdi2): Rename to __absvDI2.
+ * libgcc2.h (64-bit targets): Define COMPAT_SIMODE_TRAPPING_ARITHMETIC.
+ (__absvSI2, __addvSI3, __subvSI3, __mulvSI3, __negvSI2, __absvDI2,
+ __addvDI3, __subvDI3, __mulvDI3, __negvDI2): Define to the appropriate
+ symbol and declare.
+ (__absvsi2, __addvsi3, __subvsi3, __mulvsi3, __negvsi2): Declare if
+ COMPAT_SIMODE_TRAPPING_ARITHMETIC.
+
+2004-12-14 Steve Ellcey <sje@cup.hp.com>
+
+ * doc/invoke.texi (IA-64 options): Add existing options that
+ weren't already listed.
+
+2004-12-14 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/18951
+ * builtins.c (expand_builtin_mathfn, expand_builtin_mathfn_2): Avoid
+ using arguments passed to save_expr after that call.
+
+2004-12-13 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ PR middle-end/18730
+ * emit-rtl.c (get_first_nonnote_insn, get_last_nonnote_insn): When
+ the first/last insn is a sequence, return the first/last insn of the
+ sequence.
+
+2004-12-13 Roger Sayle <roger@eyesopen.com>
+
+ PR target/18002
+ PR middle-end/18424
+ Backport from mainline
+
+ 2004-03-20 Richard Sandiford <rsandifo@redhat.com>
+ * Makefile.in (dojump.o): Depend on $(GGC_H) and dojump.h.
+ (GTFILES): Add $(srcdir)/dojump.h.
+ (gt-dojump.h): New dependency.
+ * dojump.c (and_reg, and_test, shift_test): New static variables.
+ (prefer_and_bit_test): New function.
+ (do_jump): Use it to choose between (X & (1 << C)) and (X >> C) & 1.
+
+ 2004-03-21 Andrew Pinski <pinskia@gcc.gnu.org>
+ * dojump.c (prefer_and_bit_test): Fix which part of
+ the and_test is replaced.
+
+ 2004-12-10 Roger Sayle <roger@eyesopen.com>
+ * dojump.c (do_jump): When attempting to reverse the effects of
+ fold_single_bit_test, we need to STRIP_NOPS and narrowing type
+ conversions, and handle BIT_XOR_EXPR that's used to invert the
+ sense of the single bit test.
+
+2004-12-13 Richard Henderson <rth@redhat.com>
+
+ PR target/17990
+ * config/i386/i386.md (negsf2): Fix condition for using sse.
+ (negdf2, abssf2, absdf2): Likewise.
+ (negsf2_if, abssf2_if): Don't disable if sse enabled.
+ (movv4sf_internal splitter): Postpone til after reload.
+ (movv2di_internal splitter): Likewise.
+
+2004-12-13 Richard Henderson <rth@redhat.com>
+
+ PR middle-end/17930
+ * toplev.c (rest_of_compilation): Fix computation of
+ preferred_incoming_stack_boundary.
+
+2004-12-12 Richard Henderson <rth@redhat.com>
+
+ PR rtl-opt/17186
+ * reg-stack.c (move_for_stack_reg): Handle source register not
+ live with a nan.
+
+2004-12-12 Richard Henderson <rth@redhat.com>
+
+ PR target/18932
+ * config/i386/i386.md (all splits and peepholes): Use flags_reg_operand
+ and compare_operator to propagate the input CC mode to the output.
+ * config/i386/i386.c (compare_operator): New.
+ * config/i386/i386.h (PREDICATE_CODES): Add it.
+ * config/i386/i386-protos.h: Update.
+
+2004-12-09 Richard Henderson <rth@redhat.com>
+
+ PR c/18282
+ * attribs.c (decl_attributes): Clear DECL_ALIGN when relaying out decl.
+ * c-common.c (handle_mode_attribute): Handle enumeral types.
+
+2004-12-09 Richard Henderson <rth@redhat.com>
+
+ PR target/17025
+ * config/i386/i386.md (testqi_1_maybe_si, andqi_2_maybe_si): New.
+ (test_qi_1, andqi_2): Do not promote to simode.
+
+2004-12-07 David Mosberger <davidm@hpl.hp.com>
+
+ PR target/18443
+ * config/ia64/ia64.c (ia64_assemble_integer): Add support for
+ emitting unaligned pointer-sized integers.
+
+2004-12-07 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR middle-end/17827
+ * c-semantics.c (expand_unreachable_if_stmt): Invoke
+ expand_cond on the condition.
+
+2004-12-06 Aldy Hernandez <aldyh@redhat.com>
+
+ * config/rs6000/sysv4.h: Define RELOCATABLE_NEEDS_FIXUP to 1.
+
+2004-12-05 Richard Henderson <rth@redhat.com>
+
+ PR target/18841
+ * config/alpha/alpha.md (UNSPECV_SETJMPR_ER): New.
+ (builtin_setjmp_receiver_er_sl_1): Use it.
+ (builtin_setjmp_receiver_er_1): Likewise.
+ (builtin_setjmp_receiver_er, exception_receiver_er): Remove.
+ (builtin_setjmp_receiver): Don't split for explicit relocs until
+ after reload.
+ (exception_receiver): Likewise.
+
+2004-12-05 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/rs6000.c (rs6000_assemble_integer): Fix typo.
+
+2004-12-04 Richard Henderson <rth@redhat.com>
+
+ * emit-rtl.c, expr.c, function.c, integrate.c, optabs.c, rtl.h:
+ Revert the patches for PR rtl-opt/15289.
+
+2004-12-03 Eric Botcazou <ebotcazou@adacore.com>
+
+ * integrate.c (expand_inline_function): Accept non-CONCAT arguments
+ for CONCAT parameters and invoke read_complex_part on them.
+
+2004-12-02 Richard Henderson <rth@redhat.com>
+
+ * expr.c (write_complex_part): Use simplify_gen_subreg when the
+ submode is at least as large as a word.
+ (read_complex_part): Likewise.
+
+2004-12-02 Roger Sayle <roger@eyesopen.com>
+
+ PR target/9908
+ * config/i386/i386.md (*call_value_1, *sibcall_value_1): Correct
+ Intel assembler syntax by using %A1 instead of %*%1.
+
+2004-12-02 Richard Henderson <rth@redhat.com>
+
+ PR rtl-opt/15289
+ * emit-rtl.c (gen_complex_constant_part): Remove.
+ (gen_realpart, gen_imagpart, subreg_realpart_p): Remove.
+ * expr.c (write_complex_part, read_complex_part): New.
+ (emit_move_via_alt_mode, emit_move_via_integer, emit_move_resolve_push,
+ emit_move_complex_push, emit_move_complex, emit_move_ccmode,
+ emit_move_multi_word): Split out from ...
+ (emit_move_insn_1): ... here.
+ (expand_expr_real) <COMPLEX_EXPR>: Use write_complex_part.
+ <REALPART_EXPR, IMAGPART_EXPR>: Use read_complex_part.
+ <CONJ_EXPR>: Likewise.
+ * function.c (assign_parms): Hard-code transformations
+ instead of using gen_realpart/gen_imagpart.
+ * integrate.c (initialize_for_inline): Likewise.
+ * optabs.c (expand_unop): Use read_complex_part/write_complex_part.
+ (expand_complex_abs): Likewise.
+ (expand_binop): Likewise. Rearrange to build a CONCAT at the end,
+ rather than creating a complex target at the beginning.
+ * rtl.h (gen_realpart, gen_imagpart, subreg_realpart_p): Remove.
+ (read_complex_part, write_complex_part): Declare.
+
+2004-12-02 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/rs6000.c (rs6000_assemble_integer): Put back the
+ #ifdef RELOCATABLE_NEEDS_FIXUP.
+
+2004-12-01 Nathanael Nerode <neroden@gcc.gnu.org>
+
+ PR preprocessor/17651
+ * c-opts.c (sanitize_cpp_opts): Make flag_no_output imply
+ flag_no_line_commands.
+ * c-ppoutput.c (pp_file_change): Remove now-redundant check of
+ flag_no_output.
+
+ PR preprocessor/17610
+ * directives.c (do_include_common): Error out if an empty filename
+ is given for #include (or #include_next or #import).
+ PR preprocessor/17610
+ * testsuite/gcc.dg/cpp/empty-include.c: New testcase.
+
+2004-12-02 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/16952
+ * config/rs6000/rs6000.c (rs6000_assemble_integer): Replace
+ #ifdef RELOCATABLE_NEEDS_FIXUP with if.
+ * config/rs6000/linux.h (RELOCATABLE_NEEDS_FIXUP): Define in terms
+ of target_flags_explicit.
+ * config/rs6000/linux64.h (RELOCATABLE_NEEDS_FIXUP): Ditto for biarch
+ case. Define as 0 for non-biarch.
+
+2004-12-01 Richard Henderson <rth@redhat.com>
+
+ * expr.c (optimize_bitfield_assignment_op): Split out from ...
+ (expand_assignment): ... here. Use handled_component_p to gate
+ get_inner_reference code. Simplify MEM handling. Special case
+ CONCAT destinations.
+ (get_inner_reference): Handle REAL/IMAGPART_EXPR.
+ (handled_component_p): Likewise.
+
+2004-12-01 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/12817
+ * config/rs6000/rs6000.c (rs6000_emit_prologue): Use r0 for vrsave.
+
+2004-11-30 Jakub Jelinek <jakub@redhat.com>
+
+ * fold-const.c (extract_muldiv_1) <case ABS_EXPR>: If ctype is
+ unsigned and type signed, build ABS_EXPR with signed_type (ctype)
+ and only afterwards convert to ctype.
+
+2004-11-29 Richard Henderson <rth@redhat.com>
+
+ PR target/17224
+ * config/ia64/ia64.c (sdata_symbolic_operand): Deny offsets
+ outside the referenced object.
+
+2004-11-28 Andreas Fischer <a_fisch@gmx.de>
+ Alan Modra <amodra@bigpond.net.au>
+
+ PR target/16343
+ * config/rs6000/rs6000.c (rs6000_elf_in_small_data_p): Disallow
+ functions, strings and thread-local vars.
+
+2004-11-27 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/12769
+ * config/rs6000/rs6000.c (init_cumulative_args): Set call_cookie
+ from rs6000_default_long_calls for libcalls.
+
+ PR target/18686
+ * config/rs6000/rs6000-c.c (rs6000_pragma_longcall): Use
+ integer_zerop and integer_onep instead of comparing against
+ canonical trees.
+
+2004-11-25 Richard Henderson <rth@redhat.com>
+
+ PR c++/6764
+ * reload1.c (set_initial_eh_label_offset): New.
+ (set_initial_label_offsets): Use it.
+
+2004-11-26 Alan Modra <amodra@bigpond.net.au>
+
+ PR rtl-optimization/16356
+ * config/rs6000/rs6000.md (floatdisf2_internal2): Rewrite with
+ separate output register and one less jump. Enable for powerpc64.
+ (floatdisf2): Adjust for above.
+
+2004-11-25 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config.gcc (avr-*-rtems*): Fix typo.
+
+2004-11-24 Uros Bizjak <uros@kss-loka.si>
+
+ PR rtl-optimization/18614
+ * simplify-rtx.c (simplify_binary_operation): Do not
+ simplify inner elements of constant arguments of
+ VEC_CONCAT insn.
+
+2004-11-23 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ Backport from mainline:
+ 2004-10-18 Eric Botcazou <ebotcazou@libertysurf.fr>
+ Roger Sayle <roger@eyesopen.com>
+
+ PR middle-end/17813
+ * dojump.c (discard_pending_stack_adjust): New function.
+ (clear_pending_stack_adjust): Call it.
+ * expr.h (discard_pending_stack_adjust): Declare it.
+ * explow.c (emit_stack_save): Emit pending stack adjustments
+ before saving the stack pointer.
+ (emit_stack_restore): Discard pending stack adjustments before
+ restoring the stack pointer.
+
+2004-11-23 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config/c4x/t-rtems: New.
+ * config.gcc: Reflect having added c4x/t-rtems.
+
+2004-11-23 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config/arm/t-rtems: New.
+ * config.gcc: Reflect having added arm/t-rtems.
+
+2004-11-23 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config.gcc: Add avr-*-rtems*.
+ * config/avr/t-rtems: New.
+ * config/avr/rtems.h: New.
+
+2004-11-22 John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ PR rtl-optimization/14838
+ * emit-rtl.c (get_first_nonnote_insn): Don't assume first insn is a
+ note.
+ (get_last_nonnote_insn): Don't assume last insn is a note.
+
+2004-11-21 Roger Sayle <roger@eyesopen.com>
+
+ * fixinc/inclhack.def (alpha_pthread_init): Fix technical problems
+ with the last check-in caused by CVS variable substitution.
+ * fixinc/fixincl.x: Likewise.
+ * fixinc/tests/base/pthread.h: Likewise.
+
+2004-11-21 Roger Sayle <roger@eyesopen.com>
+ Bruce Korb <bkorb@gnu.org>
+
+ Synchronize with mainline
+ * fixinc/inclhack.def (alpha_pthread_init): New fix.
+ * fixinc/fixincl.x: Regenerate.
+ * fixinc/tests/base/pthread.h: Update for new test.
+
+2004-11-17 Ramana Radhakrishnan <ramana.radhakrishnan@codito.com>
+
+ PR target/18263
+ * config/arc/lib1funcs.asm (___umulsidi3): Change use of cmp to the
+ equivalent on the A4.
+
+2004-11-16 Joseph S. Myers <joseph@codesourcery.com>
+
+ PR c/18498
+ * c-decl.c (grokdeclarator): Call check_bitfield_type_and_width
+ after processing the declarator.
+
+2004-11-14 Andrew Pinski <pinskia@physics.uc.edu>
+
+ PR objc/18406
+ * objc/obj-act.c (encode_type): 96bits doubles are encoded the
+ same way as 64bit and 128bit doubles are.
+
+2004-11-14 Hans-Peter Nilsson <hp@bitrange.com>
+
+ PR target/18347
+ * config/mmix/mmix.c (mmix_function_outgoing_value): Handle
+ TImode. Sorry for other non-complex larger-than-64-bit modes.
+ * config/mmix/mmix.h (MIN_UNITS_PER_WORD): Do not define.
+ (INIT_CUMULATIVE_ARGS): Correct unused macro name FNDECL.
+
+2004-11-13 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * doc/md.texi (constraints) <% modifier>: Mention that it is
+ useless when the two alternatives are strictly identical.
+
+2004-11-12 Richard Henderson <rth@redhat.com>
+
+ PR 17778
+ * config/i386/i386.h (TARGET_96_ROUND_53_LONG_DOUBLE): New.
+ * config/i386/freebsd.h (SUBTARGET_OVERRIDE_OPTIONS): Remove.
+ (TARGET_96_ROUND_53_LONG_DOUBLE): New.
+ * config/i386/i386-modes.def (XF): Use it.
+
+2004-11-12 Ralf Corsepius <ralf.corsepius@rtems.org>
+
+ * config/rs6000/t-rtems (MULTILIB_NEW_EXCEPTIONS_ONLY):
+ Remove m505/roe multilib variant.
+
+2004-11-12 Eric Botcazou <ebotcazou@act-europe.fr>
+
+ Backport from mainline:
+ 2004-02-25 Richard Henderson <rth@redhat.com>
+
+ * config/alpha/alpha.c (alpha_emit_conditional_branch): Don't
+ use (op0-op1) == 0 if op0 is a pointer.
+
+2004-11-10 Joseph S. Myers <joseph@codesourcery.com>
+
+ PR c/18322
+ * c-common.c (fname_decl): Don't use line number of decl in
+ diagnostic.
+
+2004-11-10 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * config/sparc/sparc.c (function_arg_union_value): New 'slotno'
+ argument. Return naked register for unions with zero length.
+ When the union is passed in the 6th slot, build a PARALLEL with
+ only one element.
+ (function_arg): Adjust call to function_arg_union_value.
+ (function_value): Likewise.
+
+2004-11-09 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/18380
+ * config/ia64/unwind-ia64.h (_Unwind_FindTableEntry): Mark it
+ hidden.
+
+ * unwind-dw2.c (_Unwind_FindTableEntry): Removed.
+
+2004-11-10 Alan Modra <amodra@bigpond.net.au>
+
+ PR target/16480
+ 2004-08-26 Alan Modra <amodra@bigpond.net.au>
+ * config/rs6000/rs6000.c (rs6000_split_multireg_move): Don't abort
+ on "(mem (symbol_ref ..))" rtl. Look at LO_SUM base regs as well
+ as PLUS base regs.
+ 2004-08-01 Geoffrey Keating <geoffk@apple.com>
+ * config/rs6000/rs6000.c (rs6000_split_multireg_move): Just abort
+ if trying to *store* to a non-offsettable address.
+ 2004-07-30 Geoffrey Keating <geoffk@apple.com>
+ * config/rs6000/rs6000.c (rs6000_split_multireg_move): Cope with
+ non-offsettable addresses being moved into multiple GPRs.
+
+2004-11-07 Richard Sandiford <rsandifo@redhat.com>
+
+ * config/mips/t-iris6 (tp-bit.c): Fix target filename.
+
+2004-11-07 Mark Mitchell <mark@codesourcery.com>
+
+ * version.c (version_string): Set to 3.4.4.
+ * doc/include/gcc-common.texi (version): Likewise.
+
+2004-11-04 Release Manager
+
+ * GCC 3.4.3 released.
+
+2004-10-31 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/18129
+ * varasm.c (copy_constant): Don't copy STRING_CSTs if
+ flag_writable_strings.
+ (build_constant_desc): Call copy_constant unconditionally.
+
+2004-10-30 Roger Sayle <roger@eyesopen.com>
+
+ PR rtl-optimization/17581
+ * cselib.c (cselib_process_insn): The last instruction of a libcall
+ block, with the REG_RETVAL note, should be considered in the libcall.
+ * gcse.c (do_local_cprop): Allow constants to be propagated outside
+ of libcall blocks.
+ (adjust_libcall_notes): Use simplify_replace_rtx instead of
+ replace_rtx to avoid creating invalid RTL in REG_RETVAL notes.
+
+2004-10-27 Andrew Pinski <pinskia@physics.uc.edu>
+
+ PR other/18186
+ * common.opt (--param): Fix spelling of parameter.
+
+2004-10-27 Andreas Krebbel <krebbel1@de.ibm.com>
+
+ * config/s390/s390.md ("*subdf3_cc"): Replaced plus by minus.
+
+2004-10-26 Richard Sandiford <rsandifo@redhat.com>
+
+ PR bootstrap/15747
+ * doc/install.texi (mips-sgi-irix5): Document that /bin/sh has been
+ reported to hang during bootstrap and that CONFIG_SHELL=/bin/ksh
+ can be used to work around this.
+
+2004-10-26 Roger Sayle <roger@eyesopen.com>
+ John David Anglin <dave.anglin@nrc-cnrc.gc.ca>
+
+ Backport 3.4 regression fix from mainline
+ * cse.c: Change encoding of quantity numbers to avoid undefined
+ pointer arithmetic on qty_table.
+ (REGNO_QTY_VALID_P): A quantity is now valid if it isn't negative.
+ (get_cse_reg_info): Initialize reg_qty to a unique negative value.
+ (new_basic_block): Assign "real" quantity numbers from zero.
+ (delete_reg_equiv): Do nothing if quantity is invalid. Reset the
+ REG_QTY to its unique negative value.
+ (merge_equiv_classes): Calculate need_rehash if quantity is valid.
+ (cse_main): Don't include max_reg when determining max_qty.
+ (cse_basic_block): Avoid subtracting a large offset from qty_table,
+ which causes undefined C99 behaviour. Only allocate needed memory.
+
+2004-10-25 Jakub Jelinek <jakub@redhat.com>
+
+ * dwarf2out.c (rtl_for_decl_location): Avoid segfault if
+ DECL_INCOMING_RTL is NULL.
+
+2004-10-21 Eric Christopher <echristo@redhat.com>
+
+ * config/rs6000/rs6000.c (setup_incoming_varargs): Align DFmode
+ saves.
+
+2004-10-22 Bernardo Innocenti <bernie@develer.com>
+
+ Backport from the mainline:
+ 2004-10-22 Peter Barada <peter@the-baradas.com>
+ * config/m68k/m68k.h (HARD_REGNO_RENAME_OK): New macro.
+ * config/m68k/m68k.c (m68k_hard regno_rename_ok): Disallow
+ renaming of non-live registers in interrupt functions.
+ * config/m68k/m68k-protos.h (m68k_hard_regno_rename_ok): Add prototype.
+
+2004-10-21 Giovanni Bajo <giovannibajo@gcc.gnu.org>
+
+ * config/arc/lib1funcs.asm (___umulsidi3): Fix typo.
+
+2004-10-21 Aldy Hernandez <aldyh@redhat.com>
+
+ PR 18004.
+ * expmed.c (store_bit_field): Pass original 'value' before
+ recursing.
+
+2004-10-21 Ramana Radhakrishnan <ramana.radhakrishnan@codito.com>
+
+ * config/arc/lib1funcs.asm (___umulsidi3): Correct usage of flags.
+
+ PR target/17317
+ * config/arc/arc.h (REGNO_OK_FOR_BASE_P,REGNO_OK_FOR_INDEX_P,
+ REG_OK_FOR_BASE, REG_OK_FOR_INDEX): Consider blink(r31) as a valid
+ base and index register for loads.
+
+ * config/arc/t-arc: Fix multilib handling.
+
+2004-10-18 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR bootstrap/17684
+ * Makefile.in (clean): Remove libgcc_s$(SHLIB_EXT).1.stage?.
+ (stage1-start): Remove and copy libunwind.a and
+ libunwind*$(SHLIB_EXT) instead of libunwind*.
+ (stage2-start): Likewise.
+ (stage3-start): Likewise.
+ (stage4-start): Likewise.
+ (stageprofile-start): Likewise.
+ (stagefeedback-start): Likewise.
+
+ * config/alpha/t-osf4 (SHLIB_LINK): Use a temporary file for
+ the shared library to be created and don't remove the existing
+ shared library.
+ * config/arm/t-netbsd (SHLIB_LINK): Likewise.
+ * config/mips/t-iris5-6 (SHLIB_LINK): Likewise.
+ * config/pa/t-hpux-shlib (SHLIB_LINK): Likewise.
+ * config/sh/t-linux (SHLIB_LINK): Likewise.
+ * config/t-libunwind-elf (SHLIBUNWIND_LINK): Likewise.
+ * config/t-slibgcc-darwin (SHLIB_LINK): Likewise.
+ * config/t-slibgcc-elf-ver (SHLIB_LINK): Likewise.
+ * config/t-slibgcc-sld (SHLIB_LINK): Likewise.
+
+ * mklibgcc.in (libgcc-stage-start): Also move "*${objext}s"
+ files.
+
+2004-10-18 Jakub Jelinek <jakub@redhat.com>
+
+ * config/i386/i386.md (addqi_1_slp): Test for incdec_operand
+ operand 1 instead of 2
+
+2004-10-17 David O'Brien <obrien@FreeBSD.org>
+
+ * config/rs6000/sysv4.h (LINK_OS_FREEBSD_SPEC): Sync '-p' handling with
+ other FreeBSD platforms.
+
+2004-10-14 Richard Henderson <rth@redhat.com>
+
+ PR debug/14492
+ * dwarf2out.c (loc_descriptor_from_tree): Handle FIX_*_EXPR.
+
+2004-10-14 Richard Henderson <rth@redhat.com>
+
+ PR c/17023
+ * c-parse.in (compstmt_primary_start): Check last_tree non-null,
+ not current_function_decl non-null.
+
+2004-10-14 Roger Sayle <roger@eyesopen.com>
+
+ PR other/17361
+ * c-opts.c (c_common_init_options): Scan command line options for
+ "-lang-asm" and if found allow any of the C-family front-end options.
+ Likewise, scan all command line options for g77's -traditional-cpp.
+
+2004-10-14 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * doc/install.texi (*-*-solaris2*): Update with info about kernel
+ patches to solve spurious testsuite failures.
+
+2004-10-13 Andrew Pinski <pinskia@physics.uc.edu>
+
+ * dwarf2out.c (rtl_for_decl_location): Do not use MEM_P but use
+ GET_CODE == MEM.
+
+2004-10-13 Richard Henderson <rth@redhat.com>
+
+ PR debug/15860
+ * dwarf2out.c (rtl_for_decl_location): Apply big-endian correction
+ for DECL_INCOMING_RTL.
+
+2004-10-13 Richard Henderson <rth@redhat.com>
+
+ PR c/17384
+ * c-common.c (handle_mode_attribute): Disallow mode changes that
+ alter the CODE of the top-level type.
+
+ * crtstuff.c (__FRAME_END__): Remove mode attribute. Find 32-bit
+ integer from internal limits macros.
+ * config/i386/emmintrin.h (__v2df): Fix base type.
+ * config/i386/xmmintrin.h (__m128, __v4sf): Likewise.
+
+2004-10-13 Richard Henderson <rth@redhat.com>
+
+ PR debug/13841
+ * function.c (instantiate_decl): Recurse for CONCAT.
+
+2004-10-13 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR target/14454
+ * config/sparc/sparc.c (TARGET_ASM_CAN_OUTPUT_MI_THUNK): Set to
+ sparc_can_output_mi_thunk.
+ (sparc_output_mi_thunk): Simplify handling of delta offset. Add
+ handling of vcall offset.
+ (sparc_can_output_mi_thunk): New predicate.
+ * doc/tm.texi (TARGET_ASM_OUTPUT_MI_THUNK): Document VCALL_OFFSET.
+ (TARGET_ASM_OUTPUT_MI_VCALL_THUNK): Delete.
+ (TARGET_ASM_CAN_OUTPUT_MI_THUNK): New target hook.
+
+ * config/sparc/sparc.md (movdi): Remove redundant test.
+
+2004-10-12 Richard Henderson <rth@redhat.com>
+
+ PR rtl-opt/17503
+ * regclass.c (subregs_of_mode): Turn into an htab. Make static.
+ (som_hash, som_eq): New.
+ (init_subregs_of_mode, record_subregs_of_mode): New.
+ (cannot_change_mode_set_regs): Rewrite for htab implementation.
+ (invalid_mode_change_p): Likewise.
+ * combine.c (gen_lowpart_for_combine): Use record_subregs_of_mode.
+ * flow.c (mark_used_regs): Likewise.
+ (life_analysis): Use init_subregs_of_mode.
+ * regs.h (subregs_of_mode): Remove.
+ * rtl.h (init_subregs_of_mode, record_subregs_of_mode): Declare.
+
+2004-10-10 Roger Sayle <roger@eyesopen.com>
+
+ PR rtl-optimization/17853
+ * combine.c (combine_simplify_rtx): Don't attempt any simplifications
+ of vector mode comparison operators.
+ * cse.c (fold_rtx): Likewise.
+ * simplify-rtx.c (simplify_gen_relational): Avoid calling
+ simplify_relational_operation with vector mode comparison operators.
+ (simplify_rtx): Likewise.
+
+2004-10-09 Joseph S. Myers <joseph@codesourcery.com>
+
+ * config/rs6000/spe.h (atosfix16, atosfix32, atosfix64, atoufix16,
+ atoufix32, atoufix64, strtosfix16, strtosfix32, strtosfix64,
+ strtoufix16, strtoufix32, strtoufix64): Declare.
+
+2004-10-08 Andrew Pinski <pinskia@physics.uc.edu>
+
+ PR c/16999
+ * c-ppoutput.c (cb_ident): Don't quote string as it is already
+ quoted.
+
+2004-10-08 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR target/17245
+ * config/sparc/sparc.c (input_operand): Remove redundant code
+ for handling LO_SUM.
+ (legitimate_address_p) <REG+REG>: Do not recheck TARGET_V9.
+ <LO_SUM>: If LO_SUM is offsettable, accept it for TFmode on V9.
+ Otherwise only accept it for TFmode if quad move insns are available.
+
+2004-10-07 Giovanni Bajo <giovannibajo@gcc.gnu.org>
+
+ PR c++/17115
+ * tree-inline.c (expand_call_inline): Do not warn for functions
+ marked with attribute noinline.
+
+2004-10-07 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * doc/install.texi (*-*-solaris2*): Fix marker for URL.
+
+2004-10-07 Richard Sandiford <rsandifo@redhat.com>
+
+ PR target/17770
+ * config/mips/mips.md (mov_lwl): Remove hazard=none attribute.
+
+2004-10-07 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR rtl-optimization/17027
+ Backport from mainline:
+ * cfglayout.c (fixup_fallthru_exit_predecessor): If the first block
+ falls through to exit, split it.
+ * cfgrtl.c (rtl_split_block): If no insn is specified, split on the
+ first insn in the basic block.
+
+2004-10-06 H.J. Lu <hongjiu.lu@intel.com>
+
+ * doc/invoke.text (freorder-functions): Add a leading `.' to
+ "text.hot" and "text.unlikely" sections.
+
+2004-10-06 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ PR target/16007
+ * doc/install.texi (*-*-solaris2*): Mention potential problem
+ with Sun assembler + GNU linker and C++ programs.
+ Document status of binutils 2.15 release.
+
+2004-10-06 Alan Modra <amodra@bigpond.net.au>
+
+ PR 16406
+ * doc/tm.texi (LIBGCC_SPEC): Mention modifications.
+ (USE_LD_AS_NEEDED, LINK_EH_SPEC): Document.
+
+2004-10-03 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/linux.h (TARGET_C99_FUNCTIONS): Define.
+ (OS_MISSING_POWERPC64): Move, and comment.
+ * config/rs6000/linux64.h (TARGET_C99_FUNCTIONS): Define.
+ (OS_MISSING_POWERPC64): Move, and comment.
+
+2004-10-01 Nick Clifton <nickc@redhat.com>
+
+ * config/stormy16/stormy16.c (TARGET_BUILD_BUILTIN_VA_LIST_TYPE):
+ Fix typo in macro name, it should be: TARGET_BUILD_BUILTIN_VA_LIST.
+
+2004-09-30 Richard Henderson <rth@redhat.com>
+
+ * config/alpha/qrnnd.asm: Mark for noexecstack.
+
+2004-09-30 Richard Henderson <rth@redhat.com>
+
+ * unwind-dw2.c (_Unwind_GetGR): Honor DWARF_ZERO_REG.
+ * doc/tm.texi (DWARF_ZERO_REG): New.
+
+ * config/alpha/alpha.c (alpha_sa_mask, alpha_expand_prologue,
+ alpha_expand_epilogue): Revert 2003-09-30 change to store zero.
+ * config/alpha/alpha.h (DWARF_ZERO_REG): New.
+
+2004-09-29 David Edelsohn <edelsohn@gnu.org>
+
+ PR target/17493
+ Backport from mainline
+ 2004-07-16 Segher Boessenkool <segher@kernel.crashing.org>
+ * config/rs6000/eabi.asm (__eabi_convert): Fix typo (cmpi vs. cmpwi).
+
+2004-09-28 Giovanni Bajo <giovannibajo@gcc.gnu.org>
+
+ PR target/14064
+ Backport from mainline
+ 2004-09-01 James E Wilson <wilson@specifixinc.com>
+ * config/avr/avr.c (avr_unique_section): Delete prototype and
+ definition.
+ (TARGET_ASM_UNIQUE_SECTION): Delete.
+
+2004-09-26 Roger Sayle <roger@eyesopen.com>
+
+ PR other/15526
+ Backport from mainline
+ 2004-05-20 Falk Hueffner <falk@debian.org>
+ * libgcc2.c (__mulvsi3): Fix overflow test.
+
+2004-09-24 Roger Sayle <roger@eyesopen.com>
+
+ PR rtl-optimization/9771
+ * regclass.c (CALL_REALLY_USED_REGNO_P): New macro to eliminate
+ conditional compilation in init_reg_sets_1.
+ (init_reg_sets_1): Let global_regs[i] take priority over the frame
+ (but not stack) pointer exceptions to regs_invalidated_by_call.
+ (globalize_reg): Globalizing a fixed register may need to update
+ regs_invalidated_by_call.
+
+2004-09-23 Denis Chertykov <denisc@overta.ru>
+
+ PR target/16884
+ * config/avr/avr.md ("movmemhi"): Substitute match_dup to
+ match_scratch.
+ (*movmemqi_insn): Likewise.
+ (*movmemhi): Likewise.
+ (clrmemhi): Likewise.
+ (*clrmemqi): Likewise.
+ (*clrmemhi): Likewise.
+
+2004-09-23 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR bootstrap/17369
+ * Makefile.in (@set_gcc_lib_path@): Added.
+
+ * configure.ac: Include ../config/gcc-lib-path.m4. Use
+ TL_AC_GNU_MAKE_GCC_LIB_PATH.
+ * configure: Regenerated.
+
+2004-09-23 Joseph S. Myers <jsm@polyomino.org.uk>
+
+ PR c/16566
+ * c-typeck.c (build_component_ref): Don't special-case
+ COMPOUND_EXPR.
+
+2004-09-22 Ralf Corsepius <corsepiu@faw.uni-ulm.de>
+
+ PR target/15583
+ * config/rs6000/rtems.h (TARGET_OS_CPP_BUILTINS): Add
+ builtin_define ("__USE_INIT_FINI__").
+
+2004-09-20 Richard Sandiford <rsandifo@redhat.com>
+
+ PR target/17565
+ * config/mips/mips.md (define_asm_attributes): Set can_delay to no.
+
+2004-09-15 James E Wilson <wilson@specifixinc.com>
+
+ PR target/17455
+ * config/ia64/ia64.c (ia64_function_ok_for_sibcall): Return false
+ if current_function_decl is a sibcall.
+
+2004-09-15 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/linux64.h (MD_FALLBACK_FRAME_STATE_FOR): Save
+ location of CR.
+ * config/rs6000/linux.h (MD_FALLBACK_FRAME_STATE_FOR): Ditto.
+
+2004-09-15 Eric Botcazou <ebotcazou@libertysurf.fr>
+
+ * doc/install.texi (sparc-sun-solaris2*): Properly format warning.
+
+2004-09-14 Richard Henderson <rth@redhat.com>
+
+ PR rtl-opt/17186
+ * function.c (expand_function_end): Revert last change.
+
+ * sibcall.c (call_ends_block_p): Fix thinko finding the
+ last real insn in a block.
+
+2004-09-14 Joseph S. Myers <jsm@polyomino.org.uk>
+
+ PR c/15498
+ * doc/invoke.texi (Environment Variables): Correct example locale.
+
+2004-09-14 Daniel Jacobowitz <dan@debian.org>
+
+ * reload.c (find_reloads): Swap operand_loc pointers for
+ find_dummy_reload if we have swapped two operands.
+
+2004-09-13 Richard Henderson <rth@redhat.com>
+
+ PR inline-asm/6806
+ * cselib.c (cselib_invalidate_rtx): Export. Remove unused args.
+ (cselib_invalidate_rtx_note_stores): New.
+ (cselib_record_sets, cselib_process_insn): Update to match.
+ * cselib.h (cselib_invalidate_rtx): Declare.
+ * postreload.c (reload_cse_simplify): Invalidate asm clobbers.
+
+2004-09-11 Ramana Radhakrishnan <ramana.radhakrishnan@codito.com>
+
+ PR target/11476
+ * gcc/config/arc/arc.c (arc_va_args): Call build1 instead
+ of build for unary tree operators.
+
+2004-09-11 Andrew Pinski <apinski@apple.com>
+
+ PR target/17167
+ backport from the mainline:
+ 2004-02-28 Andrew Pinski <pinskia@physics.uc.edu>
+ * config/darwin.h (machopic_finish): Output stub even if
+ the symbol is already defined.
+ 2004-02-27 Dale Johannesen <dalej@apple.com>
+ * config/darwin.c (machopic_output_possible_stub_label):
+ Remove.
+ config/darwin-protos.h: Ditto.
+ config/darwin.h: Remove call to it.
+
+2004-09-11 David Edelsohn <edelsohn@gnu.org>
+
+ PR target/17277
+ * config/rs6000/aix.h (MD_FROB_UPDATE_CONTEXT): Use __64BIT__ to
+ choose 64-bit version.
+
+2004-09-09 Kaz Kojima <kkojima@gcc.gnu.org>
+
+ PR 15886
+ Backport from mainline:
+ 2004-06-11 J"orn Rennecke <joern.rennecke@superh.com>
+
+ * sh.h (ALLOCATE_INITIAL_VALUE): Use return_address_pointer_rtx.
+
+2004-09-09 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/rs6000.c (rs6000_stack_info): Correct alignment of
+ save_size.
+
+2004-09-08 H.J. Lu <hongjiu.lu@intel.com>
+
+ * configure: Regenerated.
+
+2004-09-08 Richard Henderson <rth@redhat.com>
+
+ PR rtl-opt/17186
+ * function.c (expand_function_end): Have fall-off-the-end
+ return path jump around return register setup.
+
+2004-09-08 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR target/14925:
+ Makefile.in (LIB2ADDEHSTATIC): New.
+ (LIB2ADDEHSHARED): New.
+ (LIBUNWIND): New.
+ (LIBUNWINDDEP): New.
+ (SHLIBUNWIND_LINK): New.
+ (SHLIBUNWIND_INSTALL): New.
+ (libgcc.mk): Pass LIB2ADDEHSTATIC, LIB2ADDEHSHARED, LIBUNWIND,
+ LIBUNWINDDEP, SHLIBUNWIND_LINK and SHLIBUNWIND_INSTALL.
+ (clean): Remove libunwind*
+ (stage1-start): Remove and copy stage1/libunwind*.
+ (stage2-start): Remove and copy stage2/libunwind*.
+ (stage3-start): Remove and copy stage3/libunwind*.
+ (stage4-start): Remove and copy stage4/libunwind*.
+ (stageprofile-start): Remove and copy stageprofile/libunwind*.
+ (stagefeedback-start): Remove and copy stagefeedback/libunwind*.
+
+ * config.gcc (ia64*-*-linux*): Always add t-libunwind to
+ tmake_file. Add t-libunwind-elf and ia64/t-glibc-libunwind to
+ tmake_file if --with-system-libunwind isn't used.
+
+ * config/ia64/t-glibc-libunwind: New file.
+ * config/t-libunwind-elf: Likewise.
+ * unwind-compat.c: Likewise.
+ * unwind-compat.h: Likewise.
+ * unwind-dw2-fde-compat.c: Likewise.
+
+ * config/ia64/t-glibc (LIB2ADDEH): Updated.
+ * config/ia64/t-hpux (T_CFLAGS): Add -DUSE_LIBUNWIND_EXCEPTIONS.
+
+ * config/ia64/unwind-ia64.c: Include "unwind-compat.h". Define
+ aliases if needed.
+ * unwind-dw2-fde-glibc.c: Likewise.
+ * unwind-dw2.c: Likewise.
+
+ * config/t-libunwind (LIB2ADDEH): Updated.
+ (LIB2ADDEHSTATIC): New.
+ (T_CFLAGS): Add -DUSE_LIBUNWIND_EXCEPTIONS.
+ (TARGET_LIBGCC2_CFLAGS): Set to -DUSE_GAS_SYMVER.
+
+ * configure.ac: Change --enable-libunwind-exceptions to
+ --with-system-libunwind. Don't define USE_LIBUNWIND_EXCEPTIONS.
+ * configure: Regenerated.
+ * config.in: Updated.
+
+ * doc/install.texi (ia64-*-linux): Require libunwind 0.98 or
+ above and mention --with-system-libunwind.
+ (ia64-*-hpux*): Mention --enable-libunwind-exceptions is
+ removed in gcc 3.4.3 and later.
+
+ * gcc.c (init_spec): Add -lunwind to -lgcc_s if
+ USE_LIBUNWIND_EXCEPTIONS is defined.
+
+ * mklibgcc.in: Support libunwind.
+
+2004-09-07 Mark Mitchell <mark@codesourcery.com>
+
+ * version.c (version_string): Restore pre-release marker.
+ * doc/include/gcc-common.texi: Set version to 3.4.3.
+
+2004-09-06 Release Manager
+
+ * GCC 3.4.2 released.
+
+2004-09-05 Mark Mitchell <mark@codesourcery.com>
+
+ PR bootstrap/17325
+ Backport from mainline
+ 2004-06-23 Wu Yongwei <adah@sh163.net>
+ * gthr-win32.h (__GTHREAD_MUTEX_INIT_DEFAULT): Adjust.
+ (__gthr_i486_lock_cmp_xchg): New inline assembly function.
+ (__GTHR_W32_InterlockedCompareExchange): New macro to choose a
+ suitable function for interlocked compare-and-exchange.
+ (__gthread_mutex_trylock): Use
+ __GTHR_W32_InterlockedCompareExchange.
+ (__gthread_mutex_init_function, __gthread_mutex_lock,
+ __gthread_mutex_trylock, __gthread_mutex_unlock): Adjust the
+ initial counter value to work correctly under Windows 95.
+ * config/i386/gthr-win32.c: Adjust include order.
+ Define __GTHREAD_I486_INLINE_LOCK_PRIMITIVES before including
+ gthr-win32.h.
+ (__gthr_win32_mutex_init_function, __gthr_win32_mutex_lock,
+ __gthr_win32_mutex_trylock, __gthr_win32_mutex_unlock): Adjust
+ to match inline versions in gthr-win32.h.
+
+ 2004-04-27 Wu Yongwei <adah@sh163.net>
+ * gthr-win32.h (__gthread_mutex_t): Change typedef to new structure.
+ (__GTHREAD_MUTEX_INIT_DEFAULT): Adjust.
+ (__gthread_mutex_init_function): Replace CreateMutex with
+ initialization of custom mutex using CreateSemaphore.
+ (__gthread_mutex_lock): Use InterlockedIncrement.
+ (__gthread_mutex_trylock): Use InterlockedCompareExchange.
+ (__gthread_mutex_unlock): Use InterlockedDecrement and
+ ReleaseSemaphore to unlock
+ * config/i386/gthr-win32.c (__gthread_mutex_init_function,
+ __gthread_mutex_lock, __gthread_mutex_trylock,
+ __gthread_mutex_unlock): Adjust to match inline versions in
+ gthr-win32.h.
+
+2004-09-03 Kaz Kojima <kkojima@gcc.gnu.org>
+
+ PR target/17303
+ Backport from mainline:
+ 2004-08-31 Kaz Kojima <kkojima@gcc.gnu.org>
+
+ * config/sh/sh.c (output_branch): Check the insn length possibly
+ in the delayed slot.
+
+2004-09-01 Zdenek Dvorak <rakdver@atrey.karlin.mff.cuni.cz>
+
+ PR rtl-optimization/16408
+ * gcse.c (replace_store_insn): Fix LIBCALL/RETVAL notes.
+
+2004-09-01 Richard Henderson <rth@redhat.com>
+
+ * config/ns32k/ns32k.h (TRANSFER_FROM_TRAMPOLINE): Remove.
+ (TRAMPOLINE_TEMPLATE): Merge code from __trampoline inline.
+
+2004-09-01 Jakub Jelinek <jakub@redhat.com>
+
+ * libgcc-std.ver (GCC_3.4.2): Export also __trampoline_setup.
+
+2004-09-01 Jakub Jelinek <jakub@redhat.com>
+
+ * fold-const.c (operand_equal_p): Require equal sign also for
+ FIX_{CEIL,TRUNC,FLOOR,ROUND}_EXPR.
+
+2004-08-31 Jeff Law <law@redhat.com>
+
+ * gcse.c (remove_reachable_equiv_notes): Scan loops where
+ the store expression is killed for REG_EQUAL/REG_EQUIV notes
+ that need to be removed.
+
+2004-08-30 Mark Mitchell <mark@codesourcery.com>
+
+ Revert:
+ 2004-08-29 Mark Mitchell <mark@codesourcery.com>
+ PR rtl-optimization/16590
+ * gcse.c (pre_delete): Do not create invalid REG_EQUAL notes.
+
+2004-08-29 Mark Mitchell <mark@codesourcery.com>
+
+ PR rtl-optimization/16590
+ * gcse.c (pre_delete): Do not create invalid REG_EQUAL notes.
+
+2004-08-26 Richard Henderson <rth@redhat.com>
+
+ * config/alpha/alpha.h (HARD_REGNO_MODE_OK): Allow complex float
+ modes.
+
+2004-08-26 Kazuhiro Inaoka <inaoka.kazuhiro@renesas.com>
+
+ PR target/17119.
+ * config/m32r.c (gen_compare): Use reg_or_int16_operand when
+ checking for a valid constant, regardless of sign.
+
+2004-08-25 Richard Henderson <rth@redhat.com>
+
+ PR target/16974
+ * config/alpha/alpha.md (adddi_fp_hack): Don't rely on splitting
+ if the constant satisfies add_operand.
+
+2004-08-25 Richard Henderson <rth@redhat.com>
+
+ PR debug/10695
+ * config/alpha/alpha.c (emit_frame_store_1, emit_frame_store): New.
+ (alpha_expand_prologue): Handle >32-bit frames. Generate proper
+ unwind info for >16-bit frames.
+
+2004-08-25 Richard Henderson <rth@redhat.com>
+
+ * config/alpha/alpha.c (alpha_cannot_force_const_mem): New.
+ (TARGET_CANNOT_FORCE_CONST_MEM): New.
+
+2004-08-25 Richard Earnshaw <rearnsha@arm.com>
+
+ PR target/15927
+ * arm.h (THUMB_SECONDARY_OUTPUT_RELOAD_CLASS): Don't need a secondary
+ reload if CLASS is BASE_REGS.
+
+2004-08-25 Richard Earnshaw <rearnsha@arm.com>
+
+ * PR target/15948
+ * arm.md (bicsi3_cbranch): Add alternative to handle tying operands
+ one and two.
+
+2004-08-24 Richard Henderson <rth@redhat.com>
+
+ PR target/16298
+ * config/i386/i386.c (legitimate_constant_p): Rework to not accept
+ random codes within CONST.
+
+2004-08-24 Paolo Bonzini <bonzini@gnu.org>
+
+ PR target/17113
+ * config/i386/i386.md (movv16qi_internal): Fix typo.
+
+2004-08-24 Kazuhiro Inaoka <inaoka.kazuhiro@renesas.com>
+
+ PR target/17093
+ * config/m32r/m32r.md (movsi_sda): Add SI mode specification to
+ UNSPEC.
+
+2004-08-24 Jonathan Wakely <redi@gcc.gnu.org>
+
+ * doc/trouble.texi (C++ misunderstandings): Fix example code.
+
+2004-08-23 Roger Sayle <roger@eyesopen.com>
+
+ PR rtl-optimization/17078
+ * c-semantics.c (expand_unreachable_if_stmt): If the end of the
+ specified statement list is reachable, always return a non-NULL
+ pointer. If necessary, return error_mark_node.
+
+2004-08-23 Mark Mitchell <mark@codesourcery.com>
+
+ PR c/14492
+ * dwarf2out.c (loc_descriptor_from_tree): Robustify.
+
+ PR c/16180
+ * jump.c (duplicate_loop_exit_test): If the location reached by
+ the unconditional jump at the top of the loop is outside the loop,
+ then do not treat it as the exit test.
+
+2004-08-23 Janis Johnson <janis187@us.ibm.com>
+
+ Backports from mainline:
+
+ 2004-02-11 Ziemowit Laski <zlaski@apple.com>
+ 2004-02-21 Ziemowit Laski <zlaski@apple.com>
+ 2004-02-27 Ziemowit Laski <zlaski@apple.com>
+ 2004-03-04 Ziemowit Laski <zlaski@apple.com>
+ 2004-03-20 Ziemowit Laski <zlaski@apple.com>
+ 2004-03-24 Ziemowit Laski <zlaski@apple.com>
+ 2004-05-11 Fariborz Jahanian <fjahanian@apple.com>
+ 2004-07-23 Janis Johnson <janis187@us.ibm.com>
+ 2004-08-12 Janis Johnson <janis187@us.ibm.com>
+ 2004-08-12 Ben Elliston <bje@au.ibm.com>
+ 2004-08-16 Janis Johnson <janis187@us.ibm.com>
+
+ * c-common.c (vector_size_helper): Remove; call
+ reconstruct_complex_type() instead.
+ * hooks.c (hook_constcharptr_tree_null): New hook.
+ * hooks.h (hook_constcharptr_tree_null): New prototype.
+ * target-def.h (TARGET_MANGLE_FUNDAMENTAL_TYPE): New target hook.
+ * target.h (mangle_fundamental_type): New target hook.
+ * tree.c (reconstruct_complex_type): New function
+ (formerly vector_size_helper() in c-common.c).
+ (make_vector): Make externally visible.
+ * tree.h (reconstruct_complex_type, make_vector): Add prototypes.
+ * doc/extend.texi (AltiVec builtins): Document additional differences
+ from the Motorola AltiVec PIM.
+ * doc/tm.texi (TARGET_MANGLE_FUNDAMENTAL_TYPE): Document.
+ * config/darwin.h (TARGET_OPTION_TRANSLATE_TABLE): Refer to
+ SUBTARGET_OPTION_TRANSLATE_TABLE for architecture-specific options.
+ * config/i386/darwin.h (SUBTARGET_OPTION_TRANSLATE_TABLE): Define it.
+ * config/rs6000/altivec.h: #error out if '-maltivec' not specified.
+ (vector, pixel, bool): #define to __vector, __pixel and __bool.
+ Change vector to __vector (except for the `vector' macro itself).
+ (__un_args_eq, __bin_args_eq, __tern_args_eq): Move to C-specific
+ portion of header.
+ (__altivec_link_error_invalid_argument): Remove prototype; will use
+ __builtin_altivec_compiletime_error("vec_*") instead.
+ (vec_*): Fix/complete set of available operation overloads given the
+ existence of distinct 'vector bool ...' and 'vector pixel' types;
+ tighten cv-correctness of pointer arguments; in C, always check for
+ correct argument types before macro expansion.
+ (vec_splat_s8, vec_splat_s16, vec_splat_s32, vec_splat_u8,
+ vec_splat_u16, vec_splat_u32): Change C++ definitions to accept a
+ 'const int' argument; the prototypes already do.
+ (vec_dst, vec_dstst, vec_dststt, vec_dstt, vec_sld, vec_splat): Add
+ prototypes, marked with always_inline attribute.
+ * config/rs6000/darwin.h (SUBTARGET_OPTION_TRANSLATE_TABLE): New macro
+ defining Darwin/PowerPC-specific '-f[no-]altivec' and
+ '-W[no-]altivec-long-deprecated' switches.
+ * config/rs6000/rs6000-c.c (rs6000_cpu_cpp_builtins): Pre-define
+ '__vector', '__pixel' and '__bool' macros using
+ '__attribute__((altivec(...)))' types.
+ * config/rs6000/rs6000.c (bool_char_type_node, bool_short_type_node,
+ bool_int_type_node, pixel_type_node, bool_V16QI_type_node,
+ bool_V8HI_type_node, bool_V4SI_type_node, pixel_V8HI_type_node):
+ New type nodes.
+ (rs6000_warn_altivec_long, rs6000_warn_altivec_long_switch): New, for
+ handling '-W[no-]altivec-long-deprecated'.
+ (rs6000_override_options): Handle '-W[no-]altivec-long-deprecated'.
+ (rs6000_expand_binop_builtin, rs6000_expand_ternop_builtin,
+ altivec_expand_dst_builtin): Remove casts from integer literals.
+ (altivec_expand_builtin): Likewise; handle expansion of new
+ '__builtin_altivec_compiletime_error' function.
+ (rs6000_init_builtins): Initialize 'vector bool ...' and 'vector pixel'
+ types, and make them distinct from other vector types; register
+ '__builtin_altivec_compiletime_error' function.
+ (print_operand): For 'P', print a full target register name instead of
+ merely its number.
+ (rs6000_attribute_table): Add "altivec" attribute.
+ (rs6000_handle_altivec_attribute): New function.
+ (rs6000_common_init_builtins): Rename v4si_ftype_char, v8hi_ftype_char,
+ v16qi_ftype_char, v4sf_ftype_v4si_char, v4si_ftype_v4sf_char,
+ v4si_ftype_v4si_char, v8hi_ftype_v8hi_char, v16qi_ftype_v16qi_char,
+ v16qi_ftype_v16qi_v16qi_char, v8hi_ftype_v8hi_v8hi_char,
+ v4si_ftype_v4si_v4si_char and v4sf_ftype_v4sf_v4sf_char to
+ end in ..._int; change them to accept an int instead of a char
+ as the last parameter.
+ (altivec_expand_dst_builtin): Treat expansion as completed even if
+ literal argument is invalid (so that other expansions are not tried
+ in vain).
+ (TARGET_MANGLE_FUNDAMENTAL_TYPE): Point target hook at
+ rs6000_mangle_fundamental_type.
+ (rs6000_mangle_fundamental_type): New function.
+ * config/rs6000/rs6000.h (TARGET_OPTIONS): Describe
+ '-m[no-]-warn-altivec-long' (which '-W[no-]altivec-long-deprecated'
+ maps to).
+ (rs6000_warn_altivec_long, rs6000_warn_altivec_long_switch): Forward
+ declare.
+ (ALTIVEC_BUILTIN_COMPILETIME_ERROR): New built-in enumeration.
+
+2004-08-20 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/16195
+ * config/rs6000/rs6000.h (CONDITIONAL_REGISTER_USAGE): Make r30
+ fixed if TARGET_TOC && TARGET_MINIMAL_TOC.
+ * config/rs6000/rs6000.c (first_reg_to_save): Pretend
+ call_used_regs[30] is 0 if TARGET_TOC && TARGET_MINIMAL_TOC.
+ (rs6000_emit_prologue, rs6000_emit_epilogue): Likewise.
+
+2004-08-17 Joseph S. Myers <jsm@polyomino.org.uk>
+
+ * c-decl.c (grokdeclarator): Allow for function definition where
+ innermost declarator has attributes.
+
+2004-08-17 Daniel Bornstein <danfuzz@milk.com>
+
+ PR target/17019
+ * arm.md (addsi3_cbranch_scratch): Correct case labels.
+
+2004-08-14 Roger Sayle <roger@eyesopen.com>
+ David Billinghurst <David.Billinghurst@riotinto.com>
+
+ PR libgfortran/15930
+ * fixinc/inclhack.def (irix___generic1, irix___generic2): New.
+ * fixinc/fixincl.x: Regenerate.
+ * fixinc/tests/base/internal/math_core.h: New file
+
+2004-08-14 Roger Sayle <roger@eyesopen.com>
+ Bruce Korb <bkorb@gnu.org>
+
+ * fixinc/inclhack.def (aix_syswait_2): New fix.
+ * fixinc/fixincl.x: Regenerate.
+ * fixinc/tests/base/sys/wait.h: Update for new test.
+
+2004-08-14 Gerald Pfeifer <gerald@pfeifer.com>
+ Dimitri Papadopoulos-Orfanos <papadopo@shfj.cea.fr>
+ Dave Korn <dk@artimi.com>
+
+ * doc/install.texi (Building): Avoid duplicate reference to GNU
+ make requirement.
+ (*-*-solaris2*): Do not recommend GNU make any longer. Simplify.
+
+2004-08-13 J"orn Rennecke <joern.rennecke@superh.com>
+
+ * sh.md (cbranch define_delay) Use cond_delay_slot for
+ non-anulled condition too.
+
+2004-08-11 Mark Mitchell <mark@codesourcery.com>
+
+ PR c++/16618
+ * ginclude/stddef.h (offsetof): Refine C++ definition.
+
+2004-08-12 Alan Modra <amodra@bigpond.net.au>
+
+ Merge from mainline.
+ 2004-07-01 Alan Modra <amodra@bigpond.net.au>
+ * config/rs6000/rs6000.c (rs6000_mixed_function_arg): Rewrite.
+ (function_arg): Use rs6000_arg_size rather than CLASS_MAX_NREGS in
+ calculating gpr size for altivec. Simplify and correct
+ rs6000_mixed_function_arg calls. Call rs6000_mixed_function_arg
+ for ABI_V4 gpr case too. Fix off-by-one error in long double
+ reg test. Generate the correct PARALLEL to handle long double
+ for ABI_AIX 32-bit. Use this for -m32 -mpowerpc64 fpr case too.
+ (function_arg_partial_nregs): Align before calculating regs left.
+ Don't return info on partial fprs when we need info on gprs.
+ Correct long double fpr off-by-one error.
+
+2004-08-11 James E Wilson <wilson@specifixinc.com>
+
+ PR rtl-optimization/16490
+ * cfgrtl.c (cfg_layout_split_edge): Set global_live_at_start and
+ global_live_at_end for new basic block if already set for other blocks.
+
+2004-08-07 Roger Sayle <roger@eyesopen.com>
+
+ PR middle-end/16790
+ * fold-const.c (expand_muldiv_1) <NOP_EXPR>: Disallow local
+ truncations, not just global truncations.
+
+2004-08-07 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/rs6000.c (function_arg_padding): Revert 2004-07-28.
+
+2004-08-04 Zack Weinberg <zack@codesourcery.com>
+
+ PR 13956
+ * dbxout.c (dbx_output_lbrac, dbx_output_rbrac): New
+ functions, broken out of dbxout_block.
+ (dbxout_block): The block at depth 0 should get LBRAC/RBRAC
+ stabs too, if it contains symbols. Use the begin_label and
+ the Lscope label (emitted by dbxout_function_end) for the
+ range of this block.
+ (dbxout_function_decl): Always call dbxout_function_end.
+ (dbxout_function_end): Return after emitting the "Lscope" symbol
+ under the same conditions that this function formerly wasn't called.
+ Add explanatory comments.
+
+2004-08-03 Yossi Markovich <yossim@il.ibm.com>
+ Mostafa Hagog <mustafa@il.ibm.com>
+
+ * config/rs6000/altivec.md (altivec_vnmsubfp): Fix wrong pattern.
+
+2004-07-31 Andrew Pinski <apinski@apple.com>
+
+ PR other/16842
+ * mkheaders.in (libsubdir): s/gcc-lib/gcc/.
+
+2004-07-28 Aldy Hernandez <aldyh@redhat.com>
+
+ * config/rs6000/rs6000.md ("move_from_CR_gt_bit"): Rename to
+ move_from_CR_eq_bit.
+ Rename UNSPEC_MV_CR_GT to UNSPEC_MV_CR_EQ.
+
+
+ * config/rs6000/spe.md ("e500_flip_gt_bit"): Rename to
+ e500_flip_eq_bit.
+
+ * config/rs6000/rs6000-protos.h: Rename output_e500_flip_gt_bit to
+ output_e500_flip_eq_bit.
+
+ * config/rs6000/rs6000.c (output_e500_flip_gt_bit): Rename to
+ output_e500_flip_eq_bit.
+ (rs6000_emit_sCOND): Rename call to output_e500_flip_gt_bit to
+ output_e500_flip_eq_bit.
+ Rename gen_move_from_CR_gt_bit to gen_move_from_CR_eq_bit.
+ (print_operand): case D. Get to EQ bit.
+
+2004-07-28 Alan Modra <amodra@bigpond.net.au>
+
+ * config/rs6000/rs6000.c (function_arg_padding): Pad SFmode upwards.
+
2004-07-24 Alexander Kabaev <kan@freebsd.org>
* config/ia64/ia64.h (SUBTARGET_EXTRA_SPECS): Default to nothing.
diff --git a/contrib/gcc/Makefile.in b/contrib/gcc/Makefile.in
index d0ec7688e672..30815af41866 100644
--- a/contrib/gcc/Makefile.in
+++ b/contrib/gcc/Makefile.in
@@ -505,8 +505,16 @@ CRTSTUFF_CFLAGS = -O2 $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -g0 \
# Additional sources to handle exceptions; overridden by targets as needed.
LIB2ADDEH = $(srcdir)/unwind-dw2.c $(srcdir)/unwind-dw2-fde.c \
$(srcdir)/unwind-sjlj.c $(srcdir)/gthr-gnat.c $(srcdir)/unwind-c.c
+LIB2ADDEHSTATIC = $(LIB2ADDEH)
+LIB2ADDEHSHARED = $(LIB2ADDEH)
LIB2ADDEHDEP = unwind.inc unwind-dw2-fde.h
+# Don't build libunwind by default.
+LIBUNWIND =
+LIBUNWINDDEP =
+SHLIBUNWIND_LINK =
+SHLIBUNWIND_INSTALL =
+
# nm flags to list global symbols in libgcc object files.
SHLIB_NM_FLAGS = -pg
@@ -545,6 +553,10 @@ LIB2FUNCS_EXTRA =
# Assembler files should have names ending in `.asm'.
LIB2FUNCS_STATIC_EXTRA =
+# List of extra C and assembler files to add to shared libgcc2.
+# Assembler files should have names ending in `.asm'.
+LIB2FUNCS_SHARED_EXTRA =
+
# Program to convert libraries.
LIBCONVERT =
@@ -938,6 +950,9 @@ LIB2_DIVMOD_FUNCS = _divdi3 _moddi3 _udivdi3 _umoddi3 _udiv_w_sdiv _udivmoddi4
# language hooks, generated by configure
@language_hooks@
+# Set up library path if needed.
+@set_gcc_lib_path@
+
# per-language makefile fragments
ifneq ($(LANG_MAKEFRAGS),)
include $(LANG_MAKEFRAGS)
@@ -1133,16 +1148,25 @@ xlimits.h: glimits.h limitx.h limity.h
LIB2ADD = $(LIB2FUNCS_EXTRA)
LIB2ADD_ST = $(LIB2FUNCS_STATIC_EXTRA)
+LIB2ADD_SH = $(LIB2FUNCS_SHARED_EXTRA)
-libgcc.mk: config.status Makefile mklibgcc $(LIB2ADD) $(LIB2ADD_ST) xgcc$(exeext) specs
+libgcc.mk: config.status Makefile mklibgcc $(LIB2ADD) $(LIB2ADD_ST) $(LIB2ADD_SH) \
+ xgcc$(exeext) specs
objext='$(objext)' \
LIB1ASMFUNCS='$(LIB1ASMFUNCS)' \
LIB2FUNCS_ST='$(LIB2FUNCS_ST)' \
LIBGCOV='$(LIBGCOV)' \
LIB2ADD='$(LIB2ADD)' \
LIB2ADD_ST='$(LIB2ADD_ST)' \
+ LIB2ADD_SH='$(LIB2ADD_SH)' \
LIB2ADDEH='$(LIB2ADDEH)' \
+ LIB2ADDEHSTATIC='$(LIB2ADDEHSTATIC)' \
+ LIB2ADDEHSHARED='$(LIB2ADDEHSHARED)' \
LIB2ADDEHDEP='$(LIB2ADDEHDEP)' \
+ LIBUNWIND='$(LIBUNWIND)' \
+ LIBUNWINDDEP='$(LIBUNWINDDEP)' \
+ SHLIBUNWIND_LINK='$(SHLIBUNWIND_LINK)' \
+ SHLIBUNWIND_INSTALL='$(SHLIBUNWIND_INSTALL)' \
FPBIT='$(FPBIT)' \
FPBIT_FUNCS='$(FPBIT_FUNCS)' \
LIB2_DIVMOD_FUNCS='$(LIB2_DIVMOD_FUNCS)' \
@@ -1170,8 +1194,8 @@ LIBGCC_DEPS = $(GCC_PASSES) $(LANGUAGES) stmp-int-hdrs $(STMP_FIXPROTO) \
libgcc.mk $(srcdir)/libgcc2.c $(srcdir)/libgcov.c $(TCONFIG_H) \
$(MACHMODE_H) longlong.h gbl-ctors.h config.status stmp-int-hdrs \
tsystem.h $(FPBIT) $(DPBIT) $(TPBIT) $(LIB2ADD) \
- $(LIB2ADD_ST) $(LIB2ADDEH) $(LIB2ADDEHDEP) $(EXTRA_PARTS) \
- $(srcdir)/config/$(LIB1ASMSRC) \
+ $(LIB2ADD_ST) $(LIB2ADD_SH) $(LIB2ADDEH) $(LIB2ADDEHDEP) \
+ $(EXTRA_PARTS) $(srcdir)/config/$(LIB1ASMSRC) \
$(srcdir)/gcov-io.h $(srcdir)/gcov-io.c gcov-iov.h
libgcov.a: libgcc.a; @true
@@ -1578,7 +1602,7 @@ expr.o : expr.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) f
except.h reload.h $(GGC_H) langhooks.h intl.h $(TM_P_H) real.h $(TARGET_H)
dojump.o : dojump.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) \
flags.h function.h $(EXPR_H) $(OPTABS_H) $(INSN_ATTR_H) insn-config.h \
- langhooks.h
+ langhooks.h $(GGC_H) gt-dojump.h
builtins.o : builtins.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H)\
flags.h $(TARGET_H) function.h $(REGS_H) $(EXPR_H) $(OPTABS_H) insn-config.h \
$(RECOG_H) output.h typeclass.h hard-reg-set.h toplev.h hard-reg-set.h \
@@ -1727,7 +1751,8 @@ dominance.o : dominance.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
et-forest.o : et-forest.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) et-forest.h alloc-pool.h
combine.o : combine.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) flags.h \
function.h insn-config.h $(INSN_ATTR_H) $(REGS_H) $(EXPR_H) \
- $(BASIC_BLOCK_H) $(RECOG_H) real.h hard-reg-set.h toplev.h $(TM_P_H) $(TREE_H) $(TARGET_H)
+ $(BASIC_BLOCK_H) $(RECOG_H) real.h hard-reg-set.h toplev.h $(TM_P_H) \
+ $(TREE_H) $(TARGET_H) $(PARAMS_H)
regclass.o : regclass.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
hard-reg-set.h flags.h $(BASIC_BLOCK_H) $(REGS_H) insn-config.h $(RECOG_H) reload.h \
real.h toplev.h function.h output.h $(GGC_H) $(TM_P_H) $(EXPR_H) $(TIMEVAR_H)
@@ -2060,6 +2085,7 @@ GTFILES = $(srcdir)/input.h $(srcdir)/coretypes.h $(srcdir)/cpplib.h \
$(srcdir)/c-common.h $(srcdir)/c-tree.h \
$(srcdir)/alias.c $(srcdir)/bitmap.c $(srcdir)/cselib.c $(srcdir)/cgraph.c \
$(srcdir)/dbxout.c $(srcdir)/dwarf2out.c $(srcdir)/dwarf2asm.c \
+ $(srcdir)/dojump.c \
$(srcdir)/emit-rtl.c $(srcdir)/except.c $(srcdir)/explow.c $(srcdir)/expr.c \
$(srcdir)/fold-const.c $(srcdir)/function.c \
$(srcdir)/gcse.c $(srcdir)/integrate.c $(srcdir)/lists.c $(srcdir)/optabs.c \
@@ -2079,7 +2105,7 @@ gt-cgraph.h gt-coverage.h gtype-desc.h gtype-desc.c gt-except.h \
gt-function.h gt-integrate.h gt-stmt.h gt-tree.h gt-varasm.h \
gt-emit-rtl.h gt-explow.h gt-stor-layout.h gt-regclass.h \
gt-lists.h gt-alias.h gt-cselib.h gt-fold-const.h gt-gcse.h \
-gt-expr.h gt-sdbout.h gt-optabs.h gt-bitmap.h \
+gt-expr.h gt-sdbout.h gt-optabs.h gt-bitmap.h gt-dojump.h \
gt-dwarf2out.h gt-ra-build.h gt-reg-stack.h gt-dwarf2asm.h \
gt-dbxout.h gt-c-common.h gt-c-decl.h gt-c-parse.h \
gt-c-pragma.h gtype-c.h gt-input.h gt-cfglayout.h \
@@ -2779,7 +2805,8 @@ mostlyclean: lang.mostlyclean
# that don't exist in the distribution.
clean: mostlyclean lang.clean
-rm -f libgcc.a libgcc_eh.a libgcov.a
- -rm -f libgcc_s$(SHLIB_EXT) libgcc_s$(SHLIB_EXT).1
+ -rm -f libgcc_s*
+ -rm -f libunwind*
-rm -f config.h tconfig.h bconfig.h tm_p.h tm.h
-rm -f cs-*
-rm -rf libgcc
@@ -3606,6 +3633,7 @@ stage1-start:
-if [ -f collect-ld$(exeext) ] ; then (cd stage1 && $(LN_S) ../collect-ld$(exeext) .) ; else true ; fi
-rm -f stage1/libgcc.a stage1/libgcc_eh.a stage1/libgcov.a
-rm -f stage1/libgcc_s*$(SHLIB_EXT)
+ -rm -f stage1/libunwind.a stage1/libunwind*$(SHLIB_EXT)
-cp libgcc.a stage1
-$(RANLIB_FOR_TARGET) stage1/libgcc.a
-cp libgcov.a stage1
@@ -3614,6 +3642,7 @@ stage1-start:
$(RANLIB_FOR_TARGET) stage1/libgcc_eh.a; \
fi
-cp libgcc_s*$(SHLIB_EXT) stage1
+ -cp libunwind.a libunwind*$(SHLIB_EXT) stage1
-for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
cp stage1/$${f} . ; \
else true; \
@@ -3641,6 +3670,7 @@ stage2-start:
-if [ -f collect-ld$(exeext) ] ; then (cd stage2 && $(LN_S) ../collect-ld$(exeext) .) ; else true ; fi
-rm -f stage2/libgcc.a stage2/libgcov.a stage2/libgcc_eh.a
-rm -f stage2/libgcc_s*$(SHLIB_EXT)
+ -rm -f stage2/libunwind.a stage2/libunwind*$(SHLIB_EXT)
-cp libgcc.a stage2
-$(RANLIB_FOR_TARGET) stage2/libgcc.a
-cp libgcov.a stage2
@@ -3649,6 +3679,7 @@ stage2-start:
$(RANLIB_FOR_TARGET) stage2/libgcc_eh.a; \
fi
-cp libgcc_s*$(SHLIB_EXT) stage2
+ -cp libunwind.a libunwind*$(SHLIB_EXT) stage2
-for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
cp stage2/$${f} . ; \
else true; \
@@ -3672,6 +3703,7 @@ stage3-start:
-if [ -f collect-ld$(exeext) ] ; then (cd stage3 && $(LN_S) ../collect-ld$(exeext) .) ; else true ; fi
-rm -f stage3/libgcc.a stage3/libgcov.a stage3/libgcc_eh.a
-rm -f stage3/libgcc_s*$(SHLIB_EXT)
+ -rm -f stage3/libunwind.a stage3/libunwind*$(SHLIB_EXT)
-cp libgcc.a stage3
-$(RANLIB_FOR_TARGET) stage3/libgcc.a
-cp libgcov.a stage3
@@ -3680,6 +3712,7 @@ stage3-start:
$(RANLIB_FOR_TARGET) stage3/libgcc_eh.a; \
fi
-cp libgcc_s*$(SHLIB_EXT) stage3
+ -cp libunwind.a libunwind*$(SHLIB_EXT) stage3
-for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
cp stage3/$${f} . ; \
else true; \
@@ -3703,6 +3736,7 @@ stage4-start:
-if [ -f collect-ld$(exeext) ] ; then (cd stage4 && $(LN_S) ../collect-ld$(exeext) .) ; else true ; fi
-rm -f stage4/libgcc.a stage4/libgcov.a stage4/libgcc_eh.a
-rm -f stage4/libgcc_s*$(SHLIB_EXT)
+ -rm -f stage4/libunwind.a stage4/libunwind*$(SHLIB_EXT)
-cp libgcc.a stage4
-$(RANLIB_FOR_TARGET) stage4/libgcc.a
-cp libgcov.a stage4
@@ -3711,6 +3745,7 @@ stage4-start:
$(RANLIB_FOR_TARGET) stage4/libgcc_eh.a; \
fi
-cp libgcc_s*$(SHLIB_EXT) stage4
+ -cp libunwind.a libunwind*$(SHLIB_EXT) stage4
-for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
cp stage4/$${f} . ; \
else true; \
@@ -3732,6 +3767,7 @@ stageprofile-start:
-if [ -f collect-ld$(exeext) ] ; then (cd stageprofile && $(LN_S) ../collect-ld$(exeext) .) ; else true ; fi
-rm -f stageprofile/libgcc.a stageprofile/libgcov.a stageprofile/libgcc_eh.a
-rm -f stageprofile/libgcc_s*$(SHLIB_EXT)
+ -rm -f stageprofile/libunwind.a stageprofile/libunwind*$(SHLIB_EXT)
-cp libgcc.a stageprofile
-$(RANLIB_FOR_TARGET) stageprofile/libgcc.a
-cp libgcov.a stageprofile
@@ -3740,6 +3776,7 @@ stageprofile-start:
$(RANLIB_FOR_TARGET) stageprofile/libgcc_eh.a; \
fi
-cp libgcc_s*$(SHLIB_EXT) stageprofile
+ -cp libunwind.a libunwind*$(SHLIB_EXT) stageprofile
-for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
cp stageprofile/$${f} . ; \
else true; \
@@ -3761,6 +3798,7 @@ stagefeedback-start:
-if [ -f collect-ld$(exeext) ] ; then (cd stagefeedback && $(LN_S) ../collect-ld$(exeext) .) ; else true ; fi
-rm -f stagefeedback/libgcc.a stagefeedback/libgcov.a stagefeedback/libgcc_eh.a
-rm -f stagefeedback/libgcc_s*$(SHLIB_EXT)
+ -rm -f stagefeedback/libunwind.a stagefeedback/libunwind*$(SHLIB_EXT)
-rm -f *.da
-for dir in fixinc po testsuite $(SUBDIRS); \
do \
@@ -3774,6 +3812,7 @@ stagefeedback-start:
$(RANLIB_FOR_TARGET) stagefeedback/libgcc_eh.a; \
fi
-cp libgcc_s*$(SHLIB_EXT) stagefeedback
+ -cp libunwind.a libunwind*$(SHLIB_EXT) stagefeedback
-for f in .. $(EXTRA_MULTILIB_PARTS); do if [ x$${f} != x.. ]; then \
cp stagefeedback/$${f} . ; \
else true; \
diff --git a/contrib/gcc/attribs.c b/contrib/gcc/attribs.c
index a40fea7e7610..3286bf309dfa 100644
--- a/contrib/gcc/attribs.c
+++ b/contrib/gcc/attribs.c
@@ -266,6 +266,8 @@ decl_attributes (tree *node, tree attributes, int flags)
/* Force a recalculation of mode and size. */
DECL_MODE (*node) = VOIDmode;
DECL_SIZE (*node) = 0;
+ if (!DECL_USER_ALIGN (*node))
+ DECL_ALIGN (*node) = 0;
layout_decl (*node, 0);
}
diff --git a/contrib/gcc/builtins.c b/contrib/gcc/builtins.c
index dadb6cd41a0e..a3e069e4baca 100644
--- a/contrib/gcc/builtins.c
+++ b/contrib/gcc/builtins.c
@@ -1708,6 +1708,7 @@ expand_builtin_mathfn (tree exp, rtx target, rtx subtarget)
narg = save_expr (arg);
if (narg != arg)
{
+ arg = narg;
arglist = build_tree_list (NULL_TREE, arg);
exp = build_function_call_expr (fndecl, arglist);
}
@@ -1840,6 +1841,7 @@ expand_builtin_mathfn_2 (tree exp, rtx target, rtx subtarget)
narg = save_expr (arg1);
if (narg != arg1)
{
+ arg1 = narg;
temp = build_tree_list (NULL_TREE, narg);
stable = false;
}
@@ -1849,6 +1851,7 @@ expand_builtin_mathfn_2 (tree exp, rtx target, rtx subtarget)
narg = save_expr (arg0);
if (narg != arg0)
{
+ arg0 = narg;
arglist = tree_cons (NULL_TREE, narg, temp);
stable = false;
}
@@ -6581,7 +6584,7 @@ fold_builtin (tree exp)
return build_function_call_expr (expfn, arglist);
}
- /* Optimize sqrt(pow(x,y)) = pow(x,y*0.5). */
+ /* Optimize sqrt(pow(x,y)) = pow(|x|,y*0.5). */
if (flag_unsafe_math_optimizations
&& (fcode == BUILT_IN_POW
|| fcode == BUILT_IN_POWF
@@ -6590,8 +6593,11 @@ fold_builtin (tree exp)
tree powfn = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
tree arg0 = TREE_VALUE (TREE_OPERAND (arg, 1));
tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg, 1)));
- tree narg1 = fold (build (MULT_EXPR, type, arg1,
- build_real (type, dconsthalf)));
+ tree narg1;
+ if (!tree_expr_nonnegative_p (arg0))
+ arg0 = build1 (ABS_EXPR, type, arg0);
+ narg1 = fold (build (MULT_EXPR, type, arg1,
+ build_real (type, dconsthalf)));
arglist = tree_cons (NULL_TREE, arg0,
build_tree_list (NULL_TREE, narg1));
return build_function_call_expr (powfn, arglist);
diff --git a/contrib/gcc/c-common.c b/contrib/gcc/c-common.c
index c5f4dadbaa0c..d79f2000250f 100644
--- a/contrib/gcc/c-common.c
+++ b/contrib/gcc/c-common.c
@@ -767,7 +767,6 @@ static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *);
static tree handle_cleanup_attribute (tree *, tree, tree, int, bool *);
static tree handle_warn_unused_result_attribute (tree *, tree, tree, int,
bool *);
-static tree vector_size_helper (tree, tree);
static void check_function_nonnull (tree, tree);
static void check_nonnull_arg (void *, tree, unsigned HOST_WIDE_INT);
@@ -1138,7 +1137,7 @@ fname_decl (unsigned int rid, tree id)
input_line = saved_lineno;
}
if (!ix && !current_function_decl)
- pedwarn ("%J'%D' is not defined outside of function scope", decl, decl);
+ pedwarn ("'%D' is not defined outside of function scope", decl);
return decl;
}
@@ -4643,7 +4642,10 @@ handle_mode_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
else
for (j = 0; j < NUM_MACHINE_MODES; j++)
if (!strcmp (p, GET_MODE_NAME (j)))
- mode = (enum machine_mode) j;
+ {
+ mode = (enum machine_mode) j;
+ break;
+ }
if (mode == VOIDmode)
error ("unknown machine mode `%s'", p);
@@ -4676,8 +4678,44 @@ handle_mode_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
mode);
*node = ptr_type;
}
+ else if (TREE_CODE (type) == ENUMERAL_TYPE)
+ {
+ /* For enumeral types, copy the precision from the integer
+ type returned above. If not an INTEGER_TYPE, we can't use
+ this mode for this type. */
+ if (TREE_CODE (typefm) != INTEGER_TYPE)
+ {
+ error ("cannot use mode %qs for enumeral types", p);
+ return NULL_TREE;
+ }
+
+ if (!(flags & (int) ATTR_FLAG_TYPE_IN_PLACE))
+ type = build_type_copy (type);
+
+ /* We cannot use layout_type here, because that will attempt
+ to re-layout all variants, corrupting our original. */
+ TYPE_PRECISION (type) = TYPE_PRECISION (typefm);
+ TYPE_MIN_VALUE (type) = TYPE_MIN_VALUE (typefm);
+ TYPE_MAX_VALUE (type) = TYPE_MAX_VALUE (typefm);
+ TYPE_SIZE (type) = TYPE_SIZE (typefm);
+ TYPE_SIZE_UNIT (type) = TYPE_SIZE_UNIT (typefm);
+ TYPE_MODE (type) = TYPE_MODE (typefm);
+ if (!TYPE_USER_ALIGN (type))
+ TYPE_ALIGN (type) = TYPE_ALIGN (typefm);
+
+ *node = type;
+ }
+ else if (VECTOR_MODE_P (mode)
+ ? TREE_CODE (type) != TREE_CODE (TREE_TYPE (typefm))
+ : TREE_CODE (type) != TREE_CODE (typefm))
+
+ {
+ error ("mode `%s' applied to inappropriate type", p);
+ return NULL_TREE;
+ }
else
- *node = typefm;
+ *node = typefm;
+
/* No need to layout the type here. The caller should do this. */
}
}
@@ -5246,57 +5284,11 @@ handle_vector_size_attribute (tree *node, tree name, tree args,
}
/* Build back pointers if needed. */
- *node = vector_size_helper (*node, new_type);
+ *node = reconstruct_complex_type (*node, new_type);
return NULL_TREE;
}
-/* HACK. GROSS. This is absolutely disgusting. I wish there was a
- better way.
-
- If we requested a pointer to a vector, build up the pointers that
- we stripped off while looking for the inner type. Similarly for
- return values from functions.
-
- The argument "type" is the top of the chain, and "bottom" is the
- new type which we will point to. */
-
-static tree
-vector_size_helper (tree type, tree bottom)
-{
- tree inner, outer;
-
- if (POINTER_TYPE_P (type))
- {
- inner = vector_size_helper (TREE_TYPE (type), bottom);
- outer = build_pointer_type (inner);
- }
- else if (TREE_CODE (type) == ARRAY_TYPE)
- {
- inner = vector_size_helper (TREE_TYPE (type), bottom);
- outer = build_array_type (inner, TYPE_DOMAIN (type));
- }
- else if (TREE_CODE (type) == FUNCTION_TYPE)
- {
- inner = vector_size_helper (TREE_TYPE (type), bottom);
- outer = build_function_type (inner, TYPE_ARG_TYPES (type));
- }
- else if (TREE_CODE (type) == METHOD_TYPE)
- {
- inner = vector_size_helper (TREE_TYPE (type), bottom);
- outer = build_method_type_directly (TYPE_METHOD_BASETYPE (type),
- inner,
- TYPE_ARG_TYPES (type));
- }
- else
- return bottom;
-
- TREE_READONLY (outer) = TREE_READONLY (type);
- TREE_THIS_VOLATILE (outer) = TREE_THIS_VOLATILE (type);
-
- return outer;
-}
-
/* Handle the "nonnull" attribute. */
static tree
handle_nonnull_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
diff --git a/contrib/gcc/c-decl.c b/contrib/gcc/c-decl.c
index 6e87a7662baa..619b221063b9 100644
--- a/contrib/gcc/c-decl.c
+++ b/contrib/gcc/c-decl.c
@@ -3620,10 +3620,6 @@ grokdeclarator (tree declarator, tree declspecs,
}
}
- /* Check the type and width of a bit-field. */
- if (bitfield)
- check_bitfield_type_and_width (&type, width, orig_name);
-
/* Figure out the type qualifiers for the declaration. There are
two ways a declaration can become qualified. One is something
like `const int i' where the `const' is explicit. Another is
@@ -3983,7 +3979,17 @@ grokdeclarator (tree declarator, tree declspecs,
}
else if (TREE_CODE (declarator) == CALL_EXPR)
{
+ /* Say it's a definition only for the declarator closest to
+ the identifier, apart possibly from some attributes. */
+ bool really_funcdef = false;
tree arg_types;
+ if (funcdef_flag)
+ {
+ tree t = TREE_OPERAND (declarator, 0);
+ while (TREE_CODE (t) == TREE_LIST)
+ t = TREE_VALUE (t);
+ really_funcdef = (TREE_CODE (t) == IDENTIFIER_NODE);
+ }
/* Declaring a function type.
Make sure we have a valid type for the function to return. */
@@ -4009,11 +4015,7 @@ grokdeclarator (tree declarator, tree declspecs,
inner layer of declarator. */
arg_types = grokparms (TREE_OPERAND (declarator, 1),
- funcdef_flag
- /* Say it's a definition
- only for the CALL_EXPR
- closest to the identifier. */
- && TREE_CODE (TREE_OPERAND (declarator, 0)) == IDENTIFIER_NODE);
+ really_funcdef);
/* Type qualifiers before the return type of the function
qualify the return type, not the function type. */
if (type_quals)
@@ -4127,6 +4129,10 @@ grokdeclarator (tree declarator, tree declspecs,
/* Now TYPE has the actual type. */
+ /* Check the type and width of a bit-field. */
+ if (bitfield)
+ check_bitfield_type_and_width (&type, width, orig_name);
+
/* Did array size calculations overflow? */
if (TREE_CODE (type) == ARRAY_TYPE
@@ -5126,7 +5132,7 @@ finish_struct (tree t, tree fieldlist, tree attributes)
make it one, warn and turn off the flag. */
if (TREE_CODE (t) == UNION_TYPE
&& TYPE_TRANSPARENT_UNION (t)
- && TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))
+ && (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t))))
{
TYPE_TRANSPARENT_UNION (t) = 0;
warning ("union cannot be made transparent");
@@ -5278,9 +5284,19 @@ finish_enum (tree enumtype, tree values, tree attributes)
TYPE_MIN_VALUE (enumtype) = minnode;
TYPE_MAX_VALUE (enumtype) = maxnode;
- TYPE_PRECISION (enumtype) = precision;
TREE_UNSIGNED (enumtype) = unsign;
TYPE_SIZE (enumtype) = 0;
+
+ /* If the precision of the type was specific with an attribute and it
+ was too small, give an error. Otherwise, use it. */
+ if (TYPE_PRECISION (enumtype))
+ {
+ if (precision > TYPE_PRECISION (enumtype))
+ error ("specified mode too small for enumeral values");
+ }
+ else
+ TYPE_PRECISION (enumtype) = precision;
+
layout_type (enumtype);
if (values != error_mark_node)
diff --git a/contrib/gcc/c-format.c b/contrib/gcc/c-format.c
index a532259750c4..620277ff604a 100644
--- a/contrib/gcc/c-format.c
+++ b/contrib/gcc/c-format.c
@@ -2518,9 +2518,27 @@ init_dynamic_asm_fprintf_info (void)
length modifier to work, one must have issued: "typedef
HOST_WIDE_INT __gcc_host_wide_int__;" in one's source code
prior to using that modifier. */
- if (!(hwi = maybe_get_identifier ("__gcc_host_wide_int__"))
- || !(hwi = DECL_ORIGINAL_TYPE (identifier_global_value (hwi))))
+ hwi = maybe_get_identifier ("__gcc_host_wide_int__");
+ if (!hwi)
+ {
+ error ("'__gcc_host_wide_int__' is not defined as a type");
+ return;
+ }
+ hwi = identifier_global_value (hwi);
+ if (!hwi || TREE_CODE (hwi) != TYPE_DECL)
+ {
+ error ("'__gcc_host_wide_int__' is not defined as a type");
+ return;
+ }
+ hwi = DECL_ORIGINAL_TYPE (hwi);
+ if (!hwi)
abort ();
+ if (hwi != long_integer_type_node && hwi != long_long_integer_type_node)
+ {
+ error ("'__gcc_host_wide_int__' is not defined as 'long'"
+ " or 'long long'");
+ return;
+ }
/* Create a new (writable) copy of asm_fprintf_length_specs. */
new_asm_fprintf_length_specs = xmemdup (asm_fprintf_length_specs,
@@ -2563,19 +2581,71 @@ init_dynamic_diag_info (void)
However we don't force a hard ICE because we may see only one
or the other type. */
if ((loc = maybe_get_identifier ("location_t")))
- loc = TREE_TYPE (identifier_global_value (loc));
+ {
+ loc = identifier_global_value (loc);
+ if (loc)
+ {
+ if (TREE_CODE (loc) != TYPE_DECL)
+ {
+ error ("'location_t' is not defined as a type");
+ loc = 0;
+ }
+ else
+ loc = TREE_TYPE (loc);
+ }
+ }
/* We need to grab the underlying `union tree_node' so peek into
an extra type level. */
if ((t = maybe_get_identifier ("tree")))
- t = TREE_TYPE (TREE_TYPE (identifier_global_value (t)));
+ {
+ t = identifier_global_value (t);
+ if (t)
+ {
+ if (TREE_CODE (t) != TYPE_DECL)
+ {
+ error ("'tree' is not defined as a type");
+ t = 0;
+ }
+ else if (TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE)
+ {
+ error ("'tree' is not defined as a pointer type");
+ t = 0;
+ }
+ else
+ t = TREE_TYPE (TREE_TYPE (t));
+ }
+ }
/* Find the underlying type for HOST_WIDE_INT. For the %w
length modifier to work, one must have issued: "typedef
HOST_WIDE_INT __gcc_host_wide_int__;" in one's source code
prior to using that modifier. */
if ((hwi = maybe_get_identifier ("__gcc_host_wide_int__")))
- hwi = DECL_ORIGINAL_TYPE (identifier_global_value (hwi));
+ {
+ hwi = identifier_global_value (hwi);
+ if (hwi)
+ {
+ if (TREE_CODE (hwi) != TYPE_DECL)
+ {
+ error ("'__gcc_host_wide_int__' is not defined as a type");
+ hwi = 0;
+ }
+ else
+ {
+ hwi = DECL_ORIGINAL_TYPE (hwi);
+ if (!hwi)
+ abort ();
+ if (hwi != long_integer_type_node
+ && hwi != long_long_integer_type_node)
+ {
+ error ("'__gcc_host_wide_int__' is not defined"
+ " as 'long' or 'long long'");
+ hwi = 0;
+ }
+ }
+ }
+ }
/* Assign the new data for use. */
diff --git a/contrib/gcc/c-opts.c b/contrib/gcc/c-opts.c
index 93637a2f2a84..2a617447cfdb 100644
--- a/contrib/gcc/c-opts.c
+++ b/contrib/gcc/c-opts.c
@@ -186,10 +186,10 @@ defer_opt (enum opt_code code, const char *arg)
/* Common initialization before parsing options. */
unsigned int
-c_common_init_options (unsigned int argc, const char **argv ATTRIBUTE_UNUSED)
+c_common_init_options (unsigned int argc, const char **argv)
{
static const unsigned int lang_flags[] = {CL_C, CL_ObjC, CL_CXX, CL_ObjCXX};
- unsigned int result;
+ unsigned int i, result;
/* This is conditionalized only because that is the way the front
ends used to do it. Maybe this should be unconditional? */
@@ -222,17 +222,25 @@ c_common_init_options (unsigned int argc, const char **argv ATTRIBUTE_UNUSED)
result = lang_flags[c_language];
- /* If potentially preprocessing Fortran we have to accept its front
- end options since the driver passes most of them through. */
-#ifdef CL_F77
- if (c_language == clk_c && argc > 2
- && !strcmp (argv[2], "-traditional-cpp" ))
+ if (c_language == clk_c)
{
- permit_fortran_options = true;
- result |= CL_F77;
- }
+ for (i = 1; i < argc; i++)
+ {
+ /* If preprocessing assembly language, accept any of the C-family
+ front end options since the driver may pass them through. */
+ if (! strcmp (argv[i], "-lang-asm"))
+ result |= CL_C | CL_ObjC | CL_CXX | CL_ObjCXX;
+#ifdef CL_F77
+ /* If potentially preprocessing Fortran we have to accept its
+ front end options since the driver may them through. */
+ else if (! strcmp (argv[i], "-traditional-cpp"))
+ {
+ permit_fortran_options = true;
+ result |= CL_F77;
+ }
#endif
-
+ }
+ }
return result;
}
@@ -1160,8 +1168,12 @@ c_common_post_options (const char **pfilename)
*pfilename = this_input_filename
= cpp_read_main_file (parse_in, in_fnames[0]);
+ /* Don't do any compilation or preprocessing if there is no input file. */
if (this_input_filename == NULL)
- return true;
+ {
+ errorcount++;
+ return false;
+ }
if (flag_working_directory
&& flag_preprocess_only && ! flag_no_line_commands)
@@ -1350,11 +1362,13 @@ sanitize_cpp_opts (void)
/* Disable -dD, -dN and -dI if normal output is suppressed. Allow
-dM since at least glibc relies on -M -dM to work. */
+ /* Also, flag_no_output implies flag_no_line_commands, always. */
if (flag_no_output)
{
if (flag_dump_macros != 'M')
flag_dump_macros = 0;
flag_dump_includes = 0;
+ flag_no_line_commands = 1;
}
cpp_opts->unsigned_char = !flag_signed_char;
diff --git a/contrib/gcc/c-parse.in b/contrib/gcc/c-parse.in
index f03526607bd6..66d27ac0322f 100644
--- a/contrib/gcc/c-parse.in
+++ b/contrib/gcc/c-parse.in
@@ -2149,7 +2149,7 @@ compstmt_contents_nonempty:
compstmt_primary_start:
'(' '{'
- { if (current_function_decl == 0)
+ { if (last_tree == NULL)
{
error ("braced-group within expression allowed only inside a function");
YYERROR;
diff --git a/contrib/gcc/c-ppoutput.c b/contrib/gcc/c-ppoutput.c
index 5588da3d863d..8162ccba26b3 100644
--- a/contrib/gcc/c-ppoutput.c
+++ b/contrib/gcc/c-ppoutput.c
@@ -359,7 +359,7 @@ pp_file_change (const struct line_map *map)
{
const char *flags = "";
- if (flag_no_line_commands || flag_no_output)
+ if (flag_no_line_commands)
return;
if (map != NULL)
diff --git a/contrib/gcc/c-semantics.c b/contrib/gcc/c-semantics.c
index 38c4021d6cf1..692adfcde26b 100644
--- a/contrib/gcc/c-semantics.c
+++ b/contrib/gcc/c-semantics.c
@@ -937,6 +937,9 @@ expand_unreachable_if_stmt (tree t)
return true;
}
+ /* Account for declarations as conditions. */
+ expand_cond (IF_COND (t));
+
if (THEN_CLAUSE (t) && ELSE_CLAUSE (t))
{
n = expand_unreachable_stmt (THEN_CLAUSE (t), 0);
@@ -969,7 +972,9 @@ expand_unreachable_if_stmt (tree t)
/* Expand an unreachable statement list. This function skips all
statements preceding the first potentially reachable label and
then returns the label (or, in same cases, the statement after
- one containing the label). */
+ one containing the label). This function returns NULL_TREE if
+ the end of the given statement list is unreachable, and a
+ non-NULL value, possibly error_mark_node, otherwise. */
static tree
expand_unreachable_stmt (tree t, int warn)
{
@@ -1019,7 +1024,7 @@ expand_unreachable_stmt (tree t, int warn)
case IF_STMT:
if (expand_unreachable_if_stmt (t))
- return TREE_CHAIN (t);
+ return TREE_CHAIN (t) ? TREE_CHAIN (t) : error_mark_node;
break;
case WHILE_STMT:
@@ -1027,7 +1032,7 @@ expand_unreachable_stmt (tree t, int warn)
no need to rotate the loop, instead the WHILE_STMT can be
expanded like a DO_STMT. */
genrtl_do_stmt_1 (WHILE_COND (t), WHILE_BODY (t));
- return TREE_CHAIN (t);
+ return TREE_CHAIN (t) ? TREE_CHAIN (t) : error_mark_node;
case COMPOUND_STMT:
{
@@ -1036,7 +1041,7 @@ expand_unreachable_stmt (tree t, int warn)
if (n != NULL_TREE)
{
expand_stmt (n);
- return TREE_CHAIN (t);
+ return TREE_CHAIN (t) ? TREE_CHAIN (t) : error_mark_node;
}
warn = false;
break;
diff --git a/contrib/gcc/c-typeck.c b/contrib/gcc/c-typeck.c
index c8d71e128f6b..c9479f08d127 100644
--- a/contrib/gcc/c-typeck.c
+++ b/contrib/gcc/c-typeck.c
@@ -647,7 +647,7 @@ same_translation_unit_p (tree t1, tree t2)
while (t2 && TREE_CODE (t2) != TRANSLATION_UNIT_DECL)
switch (TREE_CODE_CLASS (TREE_CODE (t2)))
{
- case 'd': t2 = DECL_CONTEXT (t1); break;
+ case 'd': t2 = DECL_CONTEXT (t2); break;
case 't': t2 = TYPE_CONTEXT (t2); break;
case 'b': t2 = BLOCK_SUPERCONTEXT (t2); break;
default: abort ();
@@ -1320,26 +1320,6 @@ build_component_ref (tree datum, tree component)
tree field = NULL;
tree ref;
- /* If DATUM is a COMPOUND_EXPR, move our reference inside it.
- If pedantic ensure that the arguments are not lvalues; otherwise,
- if the component is an array, it would wrongly decay to a pointer in
- C89 mode.
- We cannot do this with a COND_EXPR, because in a conditional expression
- the default promotions are applied to both sides, and this would yield
- the wrong type of the result; for example, if the components have
- type "char". */
- switch (TREE_CODE (datum))
- {
- case COMPOUND_EXPR:
- {
- tree value = build_component_ref (TREE_OPERAND (datum, 1), component);
- return build (COMPOUND_EXPR, TREE_TYPE (value),
- TREE_OPERAND (datum, 0), pedantic_non_lvalue (value));
- }
- default:
- break;
- }
-
/* See if there is a field or component with name COMPONENT. */
if (code == RECORD_TYPE || code == UNION_TYPE)
@@ -4117,18 +4097,32 @@ digest_init (tree type, tree init, int require_constant)
/* Build a VECTOR_CST from a *constant* vector constructor. If the
vector constructor is not constant (e.g. {1,2,3,foo()}) then punt
below and handle as a constructor. */
- if (code == VECTOR_TYPE
- && comptypes (TREE_TYPE (inside_init), type, COMPARE_STRICT)
- && TREE_CONSTANT (inside_init))
- {
- if (TREE_CODE (inside_init) == VECTOR_CST
- && comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
- TYPE_MAIN_VARIANT (type),
- COMPARE_STRICT))
- return inside_init;
- else
- return build_vector (type, CONSTRUCTOR_ELTS (inside_init));
- }
+ if (code == VECTOR_TYPE
+ && comptypes (TREE_TYPE (inside_init), type, COMPARE_STRICT)
+ && TREE_CONSTANT (inside_init))
+ {
+ if (TREE_CODE (inside_init) == VECTOR_CST
+ && comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
+ TYPE_MAIN_VARIANT (type),
+ COMPARE_STRICT))
+ return inside_init;
+
+ if (TREE_CODE (inside_init) == CONSTRUCTOR)
+ {
+ tree link;
+
+ /* Iterate through elements and check if all constructor
+ elements are *_CSTs. */
+ for (link = CONSTRUCTOR_ELTS (inside_init);
+ link;
+ link = TREE_CHAIN (link))
+ if (TREE_CODE_CLASS (TREE_CODE (TREE_VALUE (link))) != 'c')
+ break;
+
+ if (link == NULL)
+ return build_vector (type, CONSTRUCTOR_ELTS (inside_init));
+ }
+ }
/* Any type can be initialized
from an expression of the same type, optionally with braces. */
@@ -6571,6 +6565,14 @@ c_finish_case (void)
{
struct c_switch *cs = switch_stack;
+ /* If we've not seen any case labels (or a default), we may still
+ need to chain any statements that were seen as the SWITCH_BODY. */
+ if (SWITCH_BODY (cs->switch_stmt) == NULL)
+ {
+ SWITCH_BODY (cs->switch_stmt) = TREE_CHAIN (cs->switch_stmt);
+ TREE_CHAIN (cs->switch_stmt) = NULL_TREE;
+ }
+
/* Rechain the next statements to the SWITCH_STMT. */
last_tree = cs->switch_stmt;
diff --git a/contrib/gcc/c.opt b/contrib/gcc/c.opt
index 2a2ff85b7380..7160dabbc55d 100644
--- a/contrib/gcc/c.opt
+++ b/contrib/gcc/c.opt
@@ -405,7 +405,7 @@ Give strings the type \"array of char\"
ansi
C ObjC C++ ObjC++
-A synonym for -std=c89. In a future version of GCC it will become synonymous with -std=c99 instead
+A synonym for -std=c89 (for C) or -std=c++98 (for C++).
d
C ObjC C++ ObjC++ Joined
@@ -788,7 +788,7 @@ Deprecated in favor of -std=gnu99
std=iso9899:1990
C ObjC
-Deprecated in favor of -std=c89
+Conform to the ISO 1990 C standard
std=iso9899:199409
C ObjC
@@ -796,11 +796,11 @@ Conform to the ISO 1990 C standard as amended in 1994
std=iso9899:1999
C ObjC
-Deprecated in favor of -std=c99
+Conform to the ISO 1999 C standard
std=iso9899:199x
C ObjC
-Deprecated in favor of -std=c99
+Deprecated in favor of -std=iso9899:1999
traditional-cpp
C ObjC C++ ObjC++
diff --git a/contrib/gcc/calls.c b/contrib/gcc/calls.c
index 5dc96c684fd3..86e51842cf5a 100644
--- a/contrib/gcc/calls.c
+++ b/contrib/gcc/calls.c
@@ -1719,8 +1719,8 @@ load_register_parameters (struct arg_data *args, int num_actuals,
use_group_regs (call_fusage, reg);
else if (nregs == -1)
use_reg (call_fusage, reg);
- else
- use_regs (call_fusage, REGNO (reg), nregs == 0 ? 1 : nregs);
+ else if (nregs > 0)
+ use_regs (call_fusage, REGNO (reg), nregs);
}
}
}
@@ -2730,10 +2730,14 @@ expand_call (tree exp, rtx target, int ignore)
Also, do all pending adjustments now if there is any chance
this might be a call to alloca or if we are expanding a sibling
call sequence or if we are calling a function that is to return
- with stack pointer depressed. */
+ with stack pointer depressed.
+ Also do the adjustments before a throwing call, otherwise
+ exception handling can fail; PR 19225. */
if (pending_stack_adjust >= 32
|| (pending_stack_adjust > 0
&& (flags & (ECF_MAY_BE_ALLOCA | ECF_SP_DEPRESSED)))
+ || (pending_stack_adjust > 0
+ && flag_exceptions && !(flags & ECF_NOTHROW))
|| pass == 0)
do_pending_stack_adjust ();
diff --git a/contrib/gcc/cfglayout.c b/contrib/gcc/cfglayout.c
index 4794ee129d62..fa1b82c15d57 100644
--- a/contrib/gcc/cfglayout.c
+++ b/contrib/gcc/cfglayout.c
@@ -865,6 +865,18 @@ fixup_fallthru_exit_predecessor (void)
{
basic_block c = ENTRY_BLOCK_PTR->next_bb;
+ /* If the very first block is the one with the fall-through exit
+ edge, we have to split that block. */
+ if (c == bb)
+ {
+ bb = split_block (bb, NULL)->dest;
+ cfg_layout_initialize_rbi (bb);
+ bb->rbi->next = c->rbi->next;
+ c->rbi->next = bb;
+ bb->rbi->footer = c->rbi->footer;
+ c->rbi->footer = NULL;
+ }
+
while (c->rbi->next != bb)
c = c->rbi->next;
diff --git a/contrib/gcc/cfgrtl.c b/contrib/gcc/cfgrtl.c
index bad9e77b5d5a..42899f4e3833 100644
--- a/contrib/gcc/cfgrtl.c
+++ b/contrib/gcc/cfgrtl.c
@@ -483,9 +483,21 @@ rtl_split_block (basic_block bb, void *insnp)
edge e;
rtx insn = insnp;
- /* There is no point splitting the block after its end. */
- if (BB_END (bb) == insn)
- return 0;
+ if (!insn)
+ {
+ insn = first_insn_after_basic_block_note (bb);
+
+ if (insn)
+ insn = PREV_INSN (insn);
+ else
+ insn = get_last_insn ();
+ }
+
+ /* We probably should check type of the insn so that we do not create
+ inconsistent cfg. It is checked in verify_flow_info anyway, so do not
+ bother. */
+ if (insn == BB_END (bb))
+ emit_note_after (NOTE_INSN_DELETED, insn);
/* Create the new basic block. */
new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
@@ -2711,6 +2723,18 @@ cfg_layout_split_edge (edge e)
new_bb->count = e->count;
new_bb->frequency = EDGE_FREQUENCY (e);
+ /* ??? This info is likely going to be out of date very soon, but we must
+ create it to avoid getting an ICE later. */
+ if (e->dest->global_live_at_start)
+ {
+ new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ COPY_REG_SET (new_bb->global_live_at_start,
+ e->dest->global_live_at_start);
+ COPY_REG_SET (new_bb->global_live_at_end,
+ e->dest->global_live_at_start);
+ }
+
new_e = make_edge (new_bb, e->dest, EDGE_FALLTHRU);
new_e->probability = REG_BR_PROB_BASE;
new_e->count = e->count;
diff --git a/contrib/gcc/collect2.c b/contrib/gcc/collect2.c
index 4fbe3a420032..33206f5cf2ff 100644
--- a/contrib/gcc/collect2.c
+++ b/contrib/gcc/collect2.c
@@ -189,6 +189,7 @@ static int strip_flag; /* true if -s */
#ifdef COLLECT_EXPORT_LIST
static int export_flag; /* true if -bE */
static int aix64_flag; /* true if -b64 */
+static int aixrtl_flag; /* true if -brtl */
#endif
int debug; /* true if -debug */
@@ -246,7 +247,6 @@ static struct path_prefix cmdline_lib_dirs; /* directories specified with -L */
static struct path_prefix libpath_lib_dirs; /* directories in LIBPATH */
static struct path_prefix *libpaths[3] = {&cmdline_lib_dirs,
&libpath_lib_dirs, NULL};
-static const char *const libexts[3] = {"a", "so", NULL}; /* possible library extensions */
#endif
static void handler (int);
@@ -1080,6 +1080,8 @@ main (int argc, char **argv)
export_flag = 1;
else if (arg[2] == '6' && arg[3] == '4')
aix64_flag = 1;
+ else if (arg[2] == 'r' && arg[3] == 't' && arg[4] == 'l')
+ aixrtl_flag = 1;
break;
#endif
@@ -2823,6 +2825,8 @@ resolve_lib_name (const char *name)
{
char *lib_buf;
int i, j, l = 0;
+ /* Library extensions for AIX dynamic linking. */
+ const char * const libexts[2] = {"a", "so"};
for (i = 0; libpaths[i]; i++)
if (libpaths[i]->max_len > l)
@@ -2841,14 +2845,15 @@ resolve_lib_name (const char *name)
const char *p = "";
if (list->prefix[strlen(list->prefix)-1] != '/')
p = "/";
- for (j = 0; libexts[j]; j++)
+ for (j = 0; j < 2; j++)
{
sprintf (lib_buf, "%s%slib%s.%s",
- list->prefix, p, name, libexts[j]);
-if (debug) fprintf (stderr, "searching for: %s\n", lib_buf);
+ list->prefix, p, name,
+ libexts[(j + aixrtl_flag) % 2]);
+ if (debug) fprintf (stderr, "searching for: %s\n", lib_buf);
if (file_exists (lib_buf))
{
-if (debug) fprintf (stderr, "found: %s\n", lib_buf);
+ if (debug) fprintf (stderr, "found: %s\n", lib_buf);
return (lib_buf);
}
}
diff --git a/contrib/gcc/combine.c b/contrib/gcc/combine.c
index 4d06322d1e17..0a3f3816f999 100644
--- a/contrib/gcc/combine.c
+++ b/contrib/gcc/combine.c
@@ -90,6 +90,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "real.h"
#include "toplev.h"
#include "target.h"
+#include "params.h"
#ifndef SHIFT_COUNT_TRUNCATED
#define SHIFT_COUNT_TRUNCATED 0
@@ -3417,10 +3418,10 @@ subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy)
/* If this is a register being set, ignore it. */
new = XEXP (x, i);
if (in_dest
- && (code == SUBREG || code == STRICT_LOW_PART
- || code == ZERO_EXTRACT)
&& i == 0
- && GET_CODE (new) == REG)
+ && (((code == SUBREG || code == ZERO_EXTRACT)
+ && GET_CODE (new) == REG)
+ || code == STRICT_LOW_PART))
;
else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
@@ -3715,27 +3716,28 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int last,
temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
break;
case '<':
- {
- enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
- if (cmp_mode == VOIDmode)
- {
- cmp_mode = GET_MODE (XEXP (x, 1));
- if (cmp_mode == VOIDmode)
- cmp_mode = op0_mode;
- }
- temp = simplify_relational_operation (code, cmp_mode,
- XEXP (x, 0), XEXP (x, 1));
- }
-#ifdef FLOAT_STORE_FLAG_VALUE
- if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ if (! VECTOR_MODE_P (mode))
{
- if (temp == const0_rtx)
- temp = CONST0_RTX (mode);
- else
- temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
- mode);
- }
+ enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
+ if (cmp_mode == VOIDmode)
+ {
+ cmp_mode = GET_MODE (XEXP (x, 1));
+ if (cmp_mode == VOIDmode)
+ cmp_mode = op0_mode;
+ }
+ temp = simplify_relational_operation (code, cmp_mode,
+ XEXP (x, 0), XEXP (x, 1));
+#ifdef FLOAT_STORE_FLAG_VALUE
+ if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ if (temp == const0_rtx)
+ temp = CONST0_RTX (mode);
+ else
+ temp = CONST_DOUBLE_FROM_REAL_VALUE
+ (FLOAT_STORE_FLAG_VALUE (mode), mode);
+ }
#endif
+ }
break;
case 'c':
case '2':
@@ -10019,13 +10021,8 @@ gen_lowpart_for_combine (enum machine_mode mode, rtx x)
result = gen_lowpart_common (mode, x);
#ifdef CANNOT_CHANGE_MODE_CLASS
- if (result != 0
- && GET_CODE (result) == SUBREG
- && GET_CODE (SUBREG_REG (result)) == REG
- && REGNO (SUBREG_REG (result)) >= FIRST_PSEUDO_REGISTER)
- bitmap_set_bit (&subregs_of_mode, REGNO (SUBREG_REG (result))
- * MAX_MACHINE_MODE
- + GET_MODE (result));
+ if (result != 0 && GET_CODE (result) == SUBREG)
+ record_subregs_of_mode (result);
#endif
if (result)
@@ -10692,34 +10689,61 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
break;
case SUBREG:
- /* Check for the case where we are comparing A - C1 with C2,
- both constants are smaller than 1/2 the maximum positive
- value in MODE, and the comparison is equality or unsigned.
- In that case, if A is either zero-extended to MODE or has
- sufficient sign bits so that the high-order bit in MODE
- is a copy of the sign in the inner mode, we can prove that it is
- safe to do the operation in the wider mode. This simplifies
- many range checks. */
+ /* Check for the case where we are comparing A - C1 with C2, that is
+
+ (subreg:MODE (plus (A) (-C1))) op (C2)
+
+ with C1 a constant, and try to lift the SUBREG, i.e. to do the
+ comparison in the wider mode. One of the following two conditions
+ must be true in order for this to be valid:
+
+ 1. The mode extension results in the same bit pattern being added
+ on both sides and the comparison is equality or unsigned. As
+ C2 has been truncated to fit in MODE, the pattern can only be
+ all 0s or all 1s.
+
+ 2. The mode extension results in the sign bit being copied on
+ each side.
+
+ The difficulty here is that we have predicates for A but not for
+ (A - C1) so we need to check that C1 is within proper bounds so
+ as to perturbate A as little as possible. */
if (mode_width <= HOST_BITS_PER_WIDE_INT
&& subreg_lowpart_p (op0)
+ && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width
&& GET_CODE (SUBREG_REG (op0)) == PLUS
- && GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT
- && INTVAL (XEXP (SUBREG_REG (op0), 1)) < 0
- && (-INTVAL (XEXP (SUBREG_REG (op0), 1))
- < (HOST_WIDE_INT) (GET_MODE_MASK (mode) / 2))
- && (unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode) / 2
- && (0 == (nonzero_bits (XEXP (SUBREG_REG (op0), 0),
- GET_MODE (SUBREG_REG (op0)))
- & ~GET_MODE_MASK (mode))
- || (num_sign_bit_copies (XEXP (SUBREG_REG (op0), 0),
- GET_MODE (SUBREG_REG (op0)))
- > (unsigned int)
- (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
- - GET_MODE_BITSIZE (mode)))))
- {
- op0 = SUBREG_REG (op0);
- continue;
+ && GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT)
+ {
+ enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
+ rtx a = XEXP (SUBREG_REG (op0), 0);
+ HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
+
+ if ((c1 > 0
+ && (unsigned HOST_WIDE_INT) c1
+ < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
+ && (equality_comparison_p || unsigned_comparison_p)
+ /* (A - C1) zero-extends if it is positive and sign-extends
+ if it is negative, C2 both zero- and sign-extends. */
+ && ((0 == (nonzero_bits (a, inner_mode)
+ & ~GET_MODE_MASK (mode))
+ && const_op >= 0)
+ /* (A - C1) sign-extends if it is positive and 1-extends
+ if it is negative, C2 both sign- and 1-extends. */
+ || (num_sign_bit_copies (a, inner_mode)
+ > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ - mode_width)
+ && const_op < 0)))
+ || ((unsigned HOST_WIDE_INT) c1
+ < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
+ /* (A - C1) always sign-extends, like C2. */
+ && num_sign_bit_copies (a, inner_mode)
+ > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ - mode_width - 1)))
+ {
+ op0 = SUBREG_REG (op0);
+ continue;
+ }
}
/* If the inner mode is narrower and we are extracting the low part,
@@ -11357,6 +11381,47 @@ reversed_comparison (rtx exp, enum machine_mode mode, rtx op0, rtx op1)
return gen_binary (reversed_code, mode, op0, op1);
}
+/* Utility function for record_value_for_reg. Count number of
+ rtxs in X. */
+static int
+count_rtxs (rtx x)
+{
+ enum rtx_code code = GET_CODE (x);
+ const char *fmt;
+ int i, ret = 1;
+
+ if (GET_RTX_CLASS (code) == '2'
+ || GET_RTX_CLASS (code) == 'c')
+ {
+ rtx x0 = XEXP (x, 0);
+ rtx x1 = XEXP (x, 1);
+
+ if (x0 == x1)
+ return 1 + 2 * count_rtxs (x0);
+
+ if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x1)) == 'c')
+ && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
+ return 2 + 2 * count_rtxs (x0)
+ + count_rtxs (x == XEXP (x1, 0)
+ ? XEXP (x1, 1) : XEXP (x1, 0));
+
+ if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x0)) == 'c')
+ && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
+ return 2 + 2 * count_rtxs (x1)
+ + count_rtxs (x == XEXP (x0, 0)
+ ? XEXP (x0, 1) : XEXP (x0, 0));
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ ret += count_rtxs (XEXP (x, i));
+
+ return ret;
+}
+
/* Utility function for following routine. Called when X is part of a value
being stored into reg_last_set_value. Sets reg_last_set_table_tick
for each register mentioned. Similar to mention_regs in cse.c */
@@ -11463,6 +11528,13 @@ record_value_for_reg (rtx reg, rtx insn, rtx value)
&& GET_CODE (XEXP (tem, 0)) == CLOBBER
&& GET_CODE (XEXP (tem, 1)) == CLOBBER)
tem = XEXP (tem, 0);
+ else if (count_occurrences (value, reg, 1) >= 2)
+ {
+ /* If there are two or more occurrences of REG in VALUE,
+ prevent the value from growing too much. */
+ if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
+ tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
+ }
value = replace_rtx (copy_rtx (value), reg, tem);
}
diff --git a/contrib/gcc/common.opt b/contrib/gcc/common.opt
index 261c8d299bb3..20d1b75abf26 100644
--- a/contrib/gcc/common.opt
+++ b/contrib/gcc/common.opt
@@ -28,7 +28,7 @@ Display this information
-param
Common Separate
---param <param>=<value> Set paramter <param> to value. See below for a complete list of parameters
+--param <param>=<value> Set parameter <param> to value. See below for a complete list of parameters
-target-help
Common
diff --git a/contrib/gcc/config.gcc b/contrib/gcc/config.gcc
index 09466478a9de..9c5c19340439 100644
--- a/contrib/gcc/config.gcc
+++ b/contrib/gcc/config.gcc
@@ -244,6 +244,7 @@ esac
# machines.
tm_p_file=
cpu_type=`echo ${target} | sed 's/-.*$//'`
+cpu_is_64bit=
case ${target} in
alpha*-*-*)
cpu_type=alpha
@@ -300,6 +301,11 @@ powerpc*-*-*)
cpu_type=rs6000
extra_headers="ppc-asm.h altivec.h spe.h"
need_64bit_hwint=yes
+ case x$with_cpu in
+ xpowerpc64|xdefault64|x6[23]0|x970|xG5|xpower[345]|xrs64a)
+ cpu_is_64bit=yes
+ ;;
+ esac
;;
rs6000*-*-*)
need_64bit_hwint=yes
@@ -680,7 +686,7 @@ arm*-*-ecos-elf)
;;
arm*-*-rtems*)
tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h arm/rtems-elf.h rtems.h"
- tmake_file="arm/t-arm-elf t-rtems"
+ tmake_file="arm/t-arm-elf t-rtems arm/t-rtems"
;;
arm*-*-elf | ep9312-*-elf)
tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h"
@@ -700,12 +706,16 @@ arm*-*-kaos*)
tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/aout.h arm/arm.h kaos.h arm/kaos-arm.h"
tmake_file=arm/t-arm-elf
;;
+avr-*-rtems*)
+ tm_file="avr/avr.h dbxelf.h avr/rtems.h rtems.h"
+ tmake_file="avr/t-avr t-rtems avr/t-rtems"
+ ;;
avr-*-*)
tm_file="avr/avr.h dbxelf.h"
use_fixproto=yes
;;
c4x-*-rtems* | tic4x-*-rtems*)
- tmake_file="c4x/t-c4x t-rtems"
+ tmake_file="c4x/t-c4x t-rtems c4x/t-rtems"
tm_file="c4x/c4x.h c4x/rtems.h rtems.h"
c_target_objs="c4x-c.o"
cxx_target_objs="c4x-c.o"
@@ -1257,12 +1267,12 @@ ia64*-*-freebsd*)
;;
ia64*-*-linux*)
tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h ia64/sysv4.h ia64/linux.h"
- tmake_file="t-slibgcc-elf-ver t-linux ia64/t-ia64 ia64/t-glibc"
+ tmake_file="t-slibgcc-elf-ver t-linux ia64/t-ia64 t-libunwind ia64/t-glibc"
+ if test x$with_system_libunwind != xyes ; then
+ tmake_file="${tmake_file} t-libunwind-elf ia64/t-glibc-libunwind"
+ fi
target_cpu_default="MASK_GNU_AS|MASK_GNU_LD"
extra_parts="crtbegin.o crtend.o crtbeginS.o crtendS.o crtfastmath.o"
- if test x"$use_libunwind_exceptions" = xyes; then
- tmake_file="$tmake_file t-libunwind"
- fi
;;
ia64*-*-hpux*)
tm_file="${tm_file} dbxelf.h elfos.h svr4.h ia64/sysv4.h ia64/hpux.h"
@@ -1678,20 +1688,16 @@ pdp11-*-bsd)
pdp11-*-*)
use_fixproto=yes
;;
-avr-*-*)
- use_fixproto=yes
- ;;
# port not yet contributed
#powerpc-*-openbsd*)
# tmake_file="${tmake_file} rs6000/t-fprules "
# extra_headers=
# ;;
powerpc64-*-linux*)
- tm_file="rs6000/biarch64.h ${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h"
- case x$with_cpu in
- x|xpowerpc64|xdefault64) tm_file="${tm_file} rs6000/default64.h";;
- esac
- tm_file="${tm_file} rs6000/linux64.h"
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h"
+ test x$with_cpu != x || cpu_is_64bit=yes
+ test x$cpu_is_64bit != xyes || tm_file="${tm_file} rs6000/default64.h"
+ tm_file="rs6000/biarch64.h ${tm_file} rs6000/linux64.h"
tmake_file="rs6000/t-fprules t-slibgcc-elf-ver t-linux rs6000/t-ppccomm rs6000/t-linux64"
;;
powerpc64-*-gnu*)
@@ -1765,8 +1771,20 @@ powerpc-*-linux*spe*)
tmake_file="rs6000/t-fprules rs6000/t-ppcos t-slibgcc-elf-ver t-linux rs6000/t-ppccomm"
;;
powerpc-*-linux*)
- tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h rs6000/linux.h"
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h freebsd-spec.h rs6000/sysv4.h"
tmake_file="rs6000/t-fprules rs6000/t-ppcos t-slibgcc-elf-ver t-linux rs6000/t-ppccomm"
+ case ${enable_targets}:${cpu_is_64bit} in
+ *powerpc64* | all:* | *:yes)
+ if test x$cpu_is_64bit = xyes; then
+ tm_file="${tm_file} rs6000/default64.h"
+ fi
+ tm_file="rs6000/biarch64.h ${tm_file} rs6000/linux64.h"
+ tmake_file="$tmake_file rs6000/t-linux64"
+ ;;
+ *)
+ tm_file="${tm_file} rs6000/linux.h"
+ ;;
+ esac
;;
powerpc-*-gnu-gnualtivec*)
tm_file="${cpu_type}/${cpu_type}.h elfos.h svr4.h freebsd-spec.h gnu.h rs6000/sysv4.h rs6000/linux.h rs6000/linuxaltivec.h rs6000/gnu.h"
@@ -2007,7 +2025,7 @@ sparc64-*-openbsd*)
with_cpu=ultrasparc
;;
sparc-*-elf*)
- tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/elf.h"
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/sol2-gld.h sparc/elf.h"
tmake_file="sparc/t-elf sparc/t-crtfm"
extra_parts="crti.o crtn.o crtbegin.o crtend.o"
use_fixproto=yes
@@ -2017,7 +2035,7 @@ sparc-*-linux*) # SPARC's running GNU/Linux, libc6
tmake_file="t-slibgcc-elf-ver t-linux sparc/t-crtfm"
;;
sparc-*-rtems*)
- tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/elf.h sparc/rtemself.h rtems.h"
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/sol2-gld.h sparc/elf.h sparc/rtemself.h rtems.h"
tmake_file="sparc/t-elf sparc/t-crtfm t-rtems"
extra_parts="crti.o crtn.o crtbegin.o crtend.o"
;;
@@ -2106,13 +2124,13 @@ sparclite-*-coff*)
tmake_file=sparc/t-sparclite
;;
sparclite-*-elf*)
- tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/elf.h sparc/liteelf.h"
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/sol2-gld.h sparc/elf.h sparc/liteelf.h"
tmake_file="sparc/t-sparclite sparc/t-crtfm"
extra_parts="crtbegin.o crtend.o"
use_fixproto=yes
;;
sparc86x-*-elf*)
- tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/elf.h sparc/sp86x-elf.h"
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/sol2-gld.h sparc/elf.h sparc/sp86x-elf.h"
tmake_file="sparc/t-sp86x sparc/t-crtfm"
extra_parts="crtbegin.o crtend.o"
use_fixproto=yes
@@ -2122,7 +2140,7 @@ sparc64-*-aout*)
use_fixproto=yes
;;
sparc64-*-elf*)
- tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/sp64-elf.h"
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h sparc/sysv4.h sol2.h sparc/sol2.h sparc/sol2-gld.h sparc/sp64-elf.h"
tmake_file="${tmake_file} sparc/t-crtfm"
extra_parts="crtbegin.o crtend.o"
use_fixproto=yes
@@ -2398,24 +2416,24 @@ if test x$with_cpu = x ; then
esac
fi
- # Similarly for --with-schedule.
- if test x$with_schedule = x; then
- case ${target} in
- hppa1* | parisc1*)
- # Override default PA8000 scheduling model.
- with_schedule=7100LC
- ;;
- esac
- fi
+# Similarly for --with-schedule.
+if test x$with_schedule = x; then
+ case ${target} in
+ hppa1* | parisc1*)
+ # Override default PA8000 scheduling model.
+ with_schedule=7100LC
+ ;;
+ esac
+fi
- # Validate and mark as valid any --with options supported
- # by this target. In order to use a particular --with option
- # you must list it in supported_defaults; validating the value
- # is optional. This case statement should set nothing besides
- # supported_defaults.
+# Validate and mark as valid any --with options supported
+# by this target. In order to use a particular --with option
+# you must list it in supported_defaults; validating the value
+# is optional. This case statement should set nothing besides
+# supported_defaults.
- supported_defaults=
- case "${target}" in
+supported_defaults=
+case "${target}" in
alpha*-*-*)
supported_defaults="cpu tune"
for which in cpu tune; do
@@ -2569,8 +2587,7 @@ fi
eval $with_which=
;;
"" | common \
- | power | power2 | power3 | power4 \
- | powerpc | powerpc64 \
+ | power | power[2345] | powerpc | powerpc64 \
| rios | rios1 | rios2 | rsc | rsc1 | rs64a \
| 401 | 403 | 405 | 405fp | 440 | 440fp | 505 \
| 601 | 602 | 603 | 603e | ec603e | 604 \
@@ -2655,11 +2672,11 @@ fi
;;
esac
;;
- esac
+esac
- # Set some miscellaneous flags for particular targets.
- target_cpu_default2=
- case ${target} in
+# Set some miscellaneous flags for particular targets.
+target_cpu_default2=
+case ${target} in
alpha*-*-*)
if test x$gas = xyes
then
@@ -2771,44 +2788,45 @@ fi
;;
esac
;;
- esac
+esac
- t=
- all_defaults="abi cpu arch tune schedule float mode"
- for option in $all_defaults
- do
- eval "val=\$with_$option"
- if test -n "$val"; then
- case " $supported_defaults " in
- *" $option "*)
- ;;
- *)
- echo "This target does not support --with-$option." 2>&1
- exit 1
- ;;
- esac
+t=
+all_defaults="abi cpu arch tune schedule float mode"
+for option in $all_defaults
+do
+ eval "val=\$with_$option"
+ if test -n "$val"; then
+ case " $supported_defaults " in
+ *" $option "*)
+ ;;
+ *)
+ echo "This target does not support --with-$option." 2>&1
+ exit 1
+ ;;
+ esac
- if test "x$t" = x
- then
- t="{ \"$option\", \"$val\" }"
- else
- t="${t}, { \"$option\", \"$val\" }"
- fi
+ if test "x$t" = x
+ then
+ t="{ \"$option\", \"$val\" }"
+ else
+ t="${t}, { \"$option\", \"$val\" }"
fi
- done
- if test "x$t" = x
- then
- configure_default_options="{ { NULL, NULL} }"
- else
- configure_default_options="{ ${t} }"
fi
+done
+
+if test "x$t" = x
+then
+ configure_default_options="{ { NULL, NULL} }"
+else
+ configure_default_options="{ ${t} }"
+fi
- if test "$target_cpu_default2" != ""
+if test "$target_cpu_default2" != ""
+then
+ if test "$target_cpu_default" != ""
then
- if test "$target_cpu_default" != ""
- then
- target_cpu_default="(${target_cpu_default}|${target_cpu_default2})"
- else
- target_cpu_default=$target_cpu_default2
- fi
+ target_cpu_default="(${target_cpu_default}|${target_cpu_default2})"
+ else
+ target_cpu_default=$target_cpu_default2
fi
+fi
diff --git a/contrib/gcc/config.in b/contrib/gcc/config.in
index f791507cebef..547dda029454 100644
--- a/contrib/gcc/config.in
+++ b/contrib/gcc/config.in
@@ -262,6 +262,9 @@
skip when using the GAS .p2align command. */
#undef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* Define if your assembler supports .nsubspa comdat option. */
+#undef HAVE_GAS_NSUBSPA_COMDAT
+
/* Define 0/1 if your assembler supports marking sections with SHF_MERGE flag.
*/
#undef HAVE_GAS_SHF_MERGE
@@ -319,6 +322,9 @@
a read-write section. */
#undef HAVE_LD_RO_RW_SECTION_MIXING
+/* Define if your linker supports -Bstatic/-Bdynamic option. */
+#undef HAVE_LD_STATIC_DYNAMIC
+
/* Define to 1 if you have the <limits.h> header file. */
#undef HAVE_LIMITS_H
@@ -534,9 +540,6 @@
/* Define if your assembler mis-optimizes .eh_frame data. */
#undef USE_AS_TRADITIONAL_FORMAT
-/* Define if gcc should use -lunwind. */
-#undef USE_LIBUNWIND_EXCEPTIONS
-
/* Define to be the last portion of registry key on windows hosts. */
#undef WIN32_REGISTRY_KEY
@@ -549,9 +552,11 @@
/* Define to `int' if <sys/types.h> doesn't define. */
#undef gid_t
-/* Define as `__inline' if that's what the C compiler calls it, or to nothing
- if it is not supported. */
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef __cplusplus
#undef inline
+#endif
/* Define to `int' if <sys/types.h> does not define. */
#undef pid_t
diff --git a/contrib/gcc/config/alpha/alpha.c b/contrib/gcc/config/alpha/alpha.c
index 0086968afb7a..fbaeabe5049a 100644
--- a/contrib/gcc/config/alpha/alpha.c
+++ b/contrib/gcc/config/alpha/alpha.c
@@ -1947,6 +1947,17 @@ alpha_legitimize_address (rtx x, rtx scratch,
}
}
+/* Primarily this is required for TLS symbols, but given that our move
+ patterns *ought* to be able to handle any symbol at any time, we
+ should never be spilling symbolic operands to the constant pool, ever. */
+
+static bool
+alpha_cannot_force_const_mem (rtx x)
+{
+ enum rtx_code code = GET_CODE (x);
+ return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
+}
+
/* We do not allow indirect calls to be optimized into sibling calls, nor
can we allow a call to a function with a different GP to be optimized
into a sibcall. */
@@ -3186,7 +3197,13 @@ alpha_emit_conditional_branch (enum rtx_code code)
/* If the constants doesn't fit into an immediate, but can
be generated by lda/ldah, we adjust the argument and
compare against zero, so we can use beq/bne directly. */
- else if (GET_CODE (op1) == CONST_INT && (code == EQ || code == NE))
+ /* ??? Don't do this when comparing against symbols, otherwise
+ we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
+ be declared false out of hand (at least for non-weak). */
+ else if (GET_CODE (op1) == CONST_INT
+ && (code == EQ || code == NE)
+ && !(symbolic_operand (op0, VOIDmode)
+ || (GET_CODE (op0) == REG && REG_POINTER (op0))))
{
HOST_WIDE_INT v = INTVAL (op1), n = -v;
@@ -6786,11 +6803,6 @@ alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
break;
imask |= 1UL << regno;
}
-
- /* Glibc likes to use $31 as an unwind stopper for crt0. To
- avoid hackery in unwind-dw2.c, we need to actively store a
- zero in the prologue of _Unwind_RaiseException et al. */
- imask |= 1UL << 31;
}
/* If any register spilled, then spill the return address also. */
@@ -7046,6 +7058,48 @@ set_frame_related_p (void)
#define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
+/* Generates a store with the proper unwind info attached. VALUE is
+ stored at BASE_REG+BASE_OFS. If FRAME_BIAS is non-zero, then BASE_REG
+ contains SP+FRAME_BIAS, and that is the unwind info that should be
+ generated. If FRAME_REG != VALUE, then VALUE is being stored on
+ behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
+
+static void
+emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
+ HOST_WIDE_INT base_ofs, rtx frame_reg)
+{
+ rtx addr, mem, insn;
+
+ addr = plus_constant (base_reg, base_ofs);
+ mem = gen_rtx_MEM (DImode, addr);
+ set_mem_alias_set (mem, alpha_sr_alias_set);
+
+ insn = emit_move_insn (mem, value);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ if (frame_bias || value != frame_reg)
+ {
+ if (frame_bias)
+ {
+ addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
+ mem = gen_rtx_MEM (DImode, addr);
+ }
+
+ REG_NOTES (insn)
+ = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, mem, frame_reg),
+ REG_NOTES (insn));
+ }
+}
+
+static void
+emit_frame_store (unsigned int regno, rtx base_reg,
+ HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
+{
+ rtx reg = gen_rtx_REG (DImode, regno);
+ emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
+}
+
/* Write function prologue. */
/* On vms we have two kinds of functions:
@@ -7075,7 +7129,7 @@ alpha_expand_prologue (void)
HOST_WIDE_INT frame_size;
/* Offset from base reg to register save area. */
HOST_WIDE_INT reg_offset;
- rtx sa_reg, mem;
+ rtx sa_reg;
int i;
sa_size = alpha_sa_size ();
@@ -7225,37 +7279,40 @@ alpha_expand_prologue (void)
if (!TARGET_ABI_UNICOSMK)
{
+ HOST_WIDE_INT sa_bias = 0;
+
/* Cope with very large offsets to the register save area. */
sa_reg = stack_pointer_rtx;
if (reg_offset + sa_size > 0x8000)
{
int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
- HOST_WIDE_INT bias;
+ rtx sa_bias_rtx;
if (low + sa_size <= 0x8000)
- bias = reg_offset - low, reg_offset = low;
+ sa_bias = reg_offset - low, reg_offset = low;
else
- bias = reg_offset, reg_offset = 0;
+ sa_bias = reg_offset, reg_offset = 0;
sa_reg = gen_rtx_REG (DImode, 24);
- FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
- GEN_INT (bias))));
+ sa_bias_rtx = GEN_INT (sa_bias);
+
+ if (add_operand (sa_bias_rtx, DImode))
+ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
+ else
+ {
+ emit_move_insn (sa_reg, sa_bias_rtx);
+ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
+ }
}
/* Save regs in stack order. Beginning with VMS PV. */
if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
- {
- mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
- }
+ emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
/* Save register RA next. */
if (imask & (1UL << REG_RA))
{
- mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
+ emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
imask &= ~(1UL << REG_RA);
reg_offset += 8;
}
@@ -7264,36 +7321,14 @@ alpha_expand_prologue (void)
for (i = 0; i < 31; i++)
if (imask & (1UL << i))
{
- mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
+ emit_frame_store (i, sa_reg, sa_bias, reg_offset);
reg_offset += 8;
}
- /* Store a zero if requested for unwinding. */
- if (imask & (1UL << 31))
- {
- rtx insn, t;
-
- mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- insn = emit_move_insn (mem, const0_rtx);
-
- RTX_FRAME_RELATED_P (insn) = 1;
- t = gen_rtx_REG (Pmode, 31);
- t = gen_rtx_SET (VOIDmode, mem, t);
- t = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, t, REG_NOTES (insn));
- REG_NOTES (insn) = t;
-
- reg_offset += 8;
- }
-
for (i = 0; i < 31; i++)
if (fmask & (1UL << i))
{
- mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
+ emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
reg_offset += 8;
}
}
@@ -7307,19 +7342,13 @@ alpha_expand_prologue (void)
for (i = 9; i < 15; i++)
if (imask & (1UL << i))
{
- mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
- reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
+ emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
reg_offset -= 8;
}
for (i = 2; i < 10; i++)
if (fmask & (1UL << i))
{
- mem = gen_rtx_MEM (DFmode, plus_constant (hard_frame_pointer_rtx,
- reg_offset));
- set_mem_alias_set (mem, alpha_sr_alias_set);
- FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
+ emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
reg_offset -= 8;
}
}
@@ -7713,9 +7742,6 @@ alpha_expand_epilogue (void)
reg_offset += 8;
}
- if (imask & (1UL << 31))
- reg_offset += 8;
-
for (i = 0; i < 31; ++i)
if (fmask & (1UL << i))
{
@@ -10215,6 +10241,8 @@ alpha_init_libfuncs (void)
#define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
#undef TARGET_CANNOT_COPY_INSN_P
#define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
#if TARGET_ABI_OSF
#undef TARGET_ASM_OUTPUT_MI_THUNK
@@ -10257,4 +10285,3 @@ struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-alpha.h"
-
diff --git a/contrib/gcc/config/alpha/alpha.h b/contrib/gcc/config/alpha/alpha.h
index d59797c103fa..ae3a349f97ff 100644
--- a/contrib/gcc/config/alpha/alpha.h
+++ b/contrib/gcc/config/alpha/alpha.h
@@ -641,6 +641,7 @@ extern const char *alpha_tls_size_string; /* For -mtls-size= */
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
((REGNO) >= 32 && (REGNO) <= 62 \
? (MODE) == SFmode || (MODE) == DFmode || (MODE) == DImode \
+ || (MODE) == SCmode || (MODE) == DCmode \
: 1)
/* Value is 1 if MODE is a supported vector mode. */
@@ -1189,6 +1190,7 @@ do { \
#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 26)
#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (26)
#define DWARF_ALT_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (64)
+#define DWARF_ZERO_REG 31
/* Describe how we implement __builtin_eh_return. */
#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 16 : INVALID_REGNUM)
diff --git a/contrib/gcc/config/alpha/alpha.md b/contrib/gcc/config/alpha/alpha.md
index 998e30055aee..c008e1bed28a 100644
--- a/contrib/gcc/config/alpha/alpha.md
+++ b/contrib/gcc/config/alpha/alpha.md
@@ -77,6 +77,7 @@
(UNSPECV_PLDGP2 11) ; prologue ldgp
(UNSPECV_SET_TP 12)
(UNSPECV_RPCC 13)
+ (UNSPECV_SETJMPR_ER 14) ; builtin_setjmp_receiver fragment
])
;; Where necessary, the suffixes _le and _be are used to distinguish between
@@ -438,9 +439,9 @@
;; and if we split before reload, we will require additional instructions.
(define_insn "*adddi_fp_hack"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r")
- (match_operand:DI 2 "const_int_operand" "n")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r")
+ (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r,r,r")
+ (match_operand:DI 2 "const_int_operand" "K,L,n")))]
"NONSTRICT_REG_OK_FP_BASE_P (operands[1])
&& INTVAL (operands[2]) >= 0
/* This is the largest constant an lda+ldah pair can add, minus
@@ -454,7 +455,10 @@
+ max_reg_num () * UNITS_PER_WORD
+ current_function_pretend_args_size)
- current_function_pretend_args_size))"
- "#")
+ "@
+ lda %0,%2(%1)
+ ldah %0,%h2(%1)
+ #")
;; Don't do this if we are adjusting SP since we don't want to do it
;; in two steps. Don't split FP sources for the reason listed above.
@@ -6897,70 +6901,44 @@
"jmp $31,(%0),0"
[(set_attr "type" "ibr")])
-(define_insn "*builtin_setjmp_receiver_er_sl_1"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF && TARGET_AS_CAN_SUBTRACT_LABELS"
- "lda $27,$LSJ%=-%l0($27)\n$LSJ%=:")
-
-(define_insn "*builtin_setjmp_receiver_er_1"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF"
- "br $27,$LSJ%=\n$LSJ%=:"
- [(set_attr "type" "ibr")])
-
-(define_split
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_EXPLICIT_RELOCS && TARGET_ABI_OSF
- && prev_nonnote_insn (insn) == operands[0]"
- [(const_int 0)]
- "
-{
- emit_note (NOTE_INSN_DELETED);
- DONE;
-}")
-
-(define_insn "*builtin_setjmp_receiver_1"
+(define_expand "builtin_setjmp_receiver"
[(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
"TARGET_ABI_OSF"
- "br $27,$LSJ%=\n$LSJ%=:\;ldgp $29,0($27)"
- [(set_attr "length" "12")
- (set_attr "type" "multi")])
+ "")
-(define_expand "builtin_setjmp_receiver_er"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)
+(define_insn_and_split "*builtin_setjmp_receiver_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_EXPLICIT_RELOCS)
+ return "#";
+ else
+ return "br $27,$LSJ%=\n$LSJ%=:\;ldgp $29,0($27)";
+}
+ "&& TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(unspec_volatile [(match_dup 0)] UNSPECV_SETJMPR_ER)
(set (match_dup 1)
(unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LDGP1))
(set (match_dup 1)
(unspec:DI [(match_dup 1) (match_dup 3)] UNSPEC_LDGP2))]
- ""
{
operands[1] = pic_offset_table_rtx;
operands[2] = gen_rtx_REG (Pmode, 27);
operands[3] = GEN_INT (alpha_next_sequence_number++);
-})
+}
+ [(set_attr "length" "12")
+ (set_attr "type" "multi")])
-(define_expand "builtin_setjmp_receiver"
- [(unspec_volatile [(label_ref (match_operand 0 "" ""))] UNSPECV_SETJMPR)]
- "TARGET_ABI_OSF"
-{
- if (TARGET_EXPLICIT_RELOCS)
- {
- emit_insn (gen_builtin_setjmp_receiver_er (operands[0]));
- DONE;
- }
-})
+(define_insn "*builtin_setjmp_receiver_er_sl_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR_ER)]
+ "TARGET_ABI_OSF && TARGET_EXPLICIT_RELOCS && TARGET_AS_CAN_SUBTRACT_LABELS"
+ "lda $27,$LSJ%=-%l0($27)\n$LSJ%=:")
-(define_expand "exception_receiver_er"
- [(set (match_dup 0)
- (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1))
- (set (match_dup 0)
- (unspec:DI [(match_dup 0) (match_dup 2)] UNSPEC_LDGP2))]
- ""
-{
- operands[0] = pic_offset_table_rtx;
- operands[1] = gen_rtx_REG (Pmode, 26);
- operands[2] = GEN_INT (alpha_next_sequence_number++);
-})
+(define_insn "*builtin_setjmp_receiver_er_1"
+ [(unspec_volatile [(match_operand 0 "" "")] UNSPECV_SETJMPR_ER)]
+ "TARGET_ABI_OSF && TARGET_EXPLICIT_RELOCS"
+ "br $27,$LSJ%=\n$LSJ%=:"
+ [(set_attr "type" "ibr")])
(define_expand "exception_receiver"
[(unspec_volatile [(match_dup 0)] UNSPECV_EHR)]
@@ -6968,28 +6946,38 @@
{
if (TARGET_LD_BUGGY_LDGP)
operands[0] = alpha_gp_save_rtx ();
- else if (TARGET_EXPLICIT_RELOCS)
- {
- emit_insn (gen_exception_receiver_er ());
- DONE;
- }
else
operands[0] = const0_rtx;
})
-(define_insn "*exception_receiver_1"
- [(unspec_volatile [(const_int 0)] UNSPECV_EHR)]
- "! TARGET_LD_BUGGY_LDGP"
- "ldgp $29,0($26)"
- [(set_attr "length" "8")
- (set_attr "type" "multi")])
-
(define_insn "*exception_receiver_2"
[(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] UNSPECV_EHR)]
- "TARGET_LD_BUGGY_LDGP"
+ "TARGET_ABI_OSF && TARGET_LD_BUGGY_LDGP"
"ldq $29,%0"
[(set_attr "type" "ild")])
+(define_insn_and_split "*exception_receiver_1"
+ [(unspec_volatile [(const_int 0)] UNSPECV_EHR)]
+ "TARGET_ABI_OSF"
+{
+ if (TARGET_EXPLICIT_RELOCS)
+ return "ldah $29,0($26)\t\t!gpdisp!%*\;lda $29,0($29)\t\t!gpdisp!%*";
+ else
+ return "ldgp $29,0($26)";
+}
+ "&& TARGET_EXPLICIT_RELOCS && reload_completed"
+ [(set (match_dup 0)
+ (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1))
+ (set (match_dup 0)
+ (unspec:DI [(match_dup 0) (match_dup 2)] UNSPEC_LDGP2))]
+{
+ operands[0] = pic_offset_table_rtx;
+ operands[1] = gen_rtx_REG (Pmode, 26);
+ operands[2] = GEN_INT (alpha_next_sequence_number++);
+}
+ [(set_attr "length" "8")
+ (set_attr "type" "multi")])
+
(define_expand "nonlocal_goto_receiver"
[(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)
(set (reg:DI 27) (mem:DI (reg:DI 29)))
diff --git a/contrib/gcc/config/alpha/qrnnd.asm b/contrib/gcc/config/alpha/qrnnd.asm
index d6373ec1bff9..da9c4bc83388 100644
--- a/contrib/gcc/config/alpha/qrnnd.asm
+++ b/contrib/gcc/config/alpha/qrnnd.asm
@@ -26,6 +26,10 @@
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
+#ifdef __ELF__
+.section .note.GNU-stack,""
+#endif
+
.set noreorder
.set noat
diff --git a/contrib/gcc/config/alpha/t-osf4 b/contrib/gcc/config/alpha/t-osf4
index fe747a3d521d..58ce6c2d8d6b 100644
--- a/contrib/gcc/config/alpha/t-osf4
+++ b/contrib/gcc/config/alpha/t-osf4
@@ -16,8 +16,12 @@ SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-hidden_symbol,pthread\* -Wl,-hidden_symbol,__pthread\* \
-Wl,-hidden_symbol,sched_get_\* -Wl,-hidden_symbol,sched_yield \
-Wl,-msym -Wl,-set_version,gcc.1 -Wl,-soname,$(SHLIB_SONAME) \
- -o $(SHLIB_NAME) @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ -o $(SHLIB_NAME).tmp @multilib_flags@ $(SHLIB_OBJS) -lc && \
rm -f $(SHLIB_SONAME) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
$(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
diff --git a/contrib/gcc/config/arm/arm-protos.h b/contrib/gcc/config/arm/arm-protos.h
index 471254efe4e1..2da99b82d677 100644
--- a/contrib/gcc/config/arm/arm-protos.h
+++ b/contrib/gcc/config/arm/arm-protos.h
@@ -1,5 +1,6 @@
/* Prototypes for exported functions defined in arm.c and pe.c
- Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2005
+ Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rearnsha@arm.com)
Minor hacks by Nick Clifton (nickc@cygnus.com)
@@ -138,6 +139,7 @@ extern int arm_debugger_arg_offset (int, rtx);
extern int arm_is_longcall_p (rtx, int, int);
extern int arm_emit_vector_const (FILE *, rtx);
extern const char * arm_output_load_gr (rtx *);
+extern int arm_eliminable_register (rtx);
#if defined TREE_CODE
extern rtx arm_function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
diff --git a/contrib/gcc/config/arm/arm.c b/contrib/gcc/config/arm/arm.c
index 91e4486d0bbb..95188944be3d 100644
--- a/contrib/gcc/config/arm/arm.c
+++ b/contrib/gcc/config/arm/arm.c
@@ -1,6 +1,6 @@
/* Output routines for GCC for ARM.
Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
- 2002, 2003, 2004 Free Software Foundation, Inc.
+ 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
More major hacks by Richard Earnshaw (rearnsha@arm.com).
@@ -4056,6 +4056,16 @@ cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
&& INTVAL (op) < 64);
}
+/* Return true if X is a register that will be eliminated later on. */
+int
+arm_eliminable_register (rtx x)
+{
+ return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
+ || REGNO (x) == ARG_POINTER_REGNUM
+ || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
+ && REGNO (x) <= LAST_VIRTUAL_REGISTER));
+}
+
/* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
Use by the Cirrus Maverick code which has to workaround
a hardware bug triggered by such instructions. */
@@ -4569,33 +4579,42 @@ adjacent_mem_locations (rtx a, rtx b)
|| (GET_CODE (XEXP (b, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
{
- int val0 = 0, val1 = 0;
- int reg0, reg1;
-
+ HOST_WIDE_INT val0 = 0, val1 = 0;
+ rtx reg0, reg1;
+ int val_diff;
+
if (GET_CODE (XEXP (a, 0)) == PLUS)
{
- reg0 = REGNO (XEXP (XEXP (a, 0), 0));
+ reg0 = XEXP (XEXP (a, 0), 0);
val0 = INTVAL (XEXP (XEXP (a, 0), 1));
}
else
- reg0 = REGNO (XEXP (a, 0));
+ reg0 = XEXP (a, 0);
if (GET_CODE (XEXP (b, 0)) == PLUS)
{
- reg1 = REGNO (XEXP (XEXP (b, 0), 0));
+ reg1 = XEXP (XEXP (b, 0), 0);
val1 = INTVAL (XEXP (XEXP (b, 0), 1));
}
else
- reg1 = REGNO (XEXP (b, 0));
+ reg1 = XEXP (b, 0);
/* Don't accept any offset that will require multiple
instructions to handle, since this would cause the
arith_adjacentmem pattern to output an overlong sequence. */
if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
return 0;
-
- return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
+
+ /* Don't allow an eliminable register: register elimination can make
+ the offset too large. */
+ if (arm_eliminable_register (reg0))
+ return 0;
+
+ val_diff = val1 - val0;
+ return ((REGNO (reg0) == REGNO (reg1))
+ && (val_diff == 4 || val_diff == -4));
}
+
return 0;
}
@@ -7301,7 +7320,6 @@ output_call_mem (rtx *operands)
return "";
}
-
/* Output a move from arm registers to an fpa registers.
OPERANDS[0] is an fpa register.
OPERANDS[1] is the first registers of an arm register pair. */
diff --git a/contrib/gcc/config/arm/arm.h b/contrib/gcc/config/arm/arm.h
index 3a13d919243b..94d8b943b410 100644
--- a/contrib/gcc/config/arm/arm.h
+++ b/contrib/gcc/config/arm/arm.h
@@ -1396,7 +1396,7 @@ enum reg_class
: NO_REGS)
#define THUMB_SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
- ((CLASS) != LO_REGS \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
? ((true_regnum (X) == -1 ? LO_REGS \
: (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
: NO_REGS)) \
diff --git a/contrib/gcc/config/arm/arm.md b/contrib/gcc/config/arm/arm.md
index 9f10d1063dfa..57926ba6b9bc 100644
--- a/contrib/gcc/config/arm/arm.md
+++ b/contrib/gcc/config/arm/arm.md
@@ -5960,22 +5960,24 @@
[(set (pc)
(if_then_else
(match_operator 5 "equality_operator"
- [(and:SI (not:SI (match_operand:SI 3 "s_register_operand" "l,l,l,l"))
- (match_operand:SI 2 "s_register_operand" "0,1,1,1"))
+ [(and:SI (not:SI (match_operand:SI 3 "s_register_operand" "l,l,l,l,l"))
+ (match_operand:SI 2 "s_register_operand" "0,1,1,1,1"))
(const_int 0)])
(label_ref (match_operand 4 "" ""))
(pc)))
- (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=l,*?h,*?m,*?m")
+ (set (match_operand:SI 0 "thumb_cbrch_target_operand" "=!l,l,*?h,*?m,*?m")
(and:SI (not:SI (match_dup 3)) (match_dup 2)))
- (clobber (match_scratch:SI 1 "=X,l,&l,&l"))]
+ (clobber (match_scratch:SI 1 "=X,l,l,&l,&l"))]
"TARGET_THUMB"
"*
{
if (which_alternative == 0)
output_asm_insn (\"bic\\t%0, %3\", operands);
- else if (which_alternative == 1)
+ else if (which_alternative <= 2)
{
output_asm_insn (\"bic\\t%1, %3\", operands);
+ /* It's ok if OP0 is a lo-reg, even though the mov will set the
+ conditions again, since we're only testing for equality. */
output_asm_insn (\"mov\\t%0, %1\", operands);
}
else
@@ -6234,10 +6236,10 @@
case 1:
output_asm_insn (\"cmn\t%1, %2\", operands);
break;
- case 3:
+ case 2:
output_asm_insn (\"add\t%0, %1, %2\", operands);
break;
- case 4:
+ case 3:
output_asm_insn (\"add\t%0, %0, %2\", operands);
break;
}
@@ -7128,8 +7130,8 @@
(const_string "no")))
(set (attr "length")
(if_then_else
- (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
- (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2048)))
(const_int 2)
(const_int 4)))]
)
diff --git a/contrib/gcc/config/arm/t-netbsd b/contrib/gcc/config/arm/t-netbsd
index 77e622716f25..533fab947a1c 100644
--- a/contrib/gcc/config/arm/t-netbsd
+++ b/contrib/gcc/config/arm/t-netbsd
@@ -11,8 +11,12 @@ SHLIB_OBJS = @shlib_objs@
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-soname,$(SHLIB_SONAME) \
- -o $(SHLIB_NAME) @multilib_flags@ $(SHLIB_OBJS) -lc && \
+ -o $(SHLIB_NAME).tmp @multilib_flags@ $(SHLIB_OBJS) -lc && \
rm -f $(SHLIB_SONAME) && \
+ if [ -f $(SHLIB_NAME) ]; then \
+ mv -f $(SHLIB_NAME) $(SHLIB_NAME).backup; \
+ else true; fi && \
+ mv $(SHLIB_NAME).tmp $(SHLIB_NAME) && \
$(LN_S) $(SHLIB_NAME) $(SHLIB_SONAME)
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
diff --git a/contrib/gcc/config/arm/t-rtems b/contrib/gcc/config/arm/t-rtems
new file mode 100644
index 000000000000..52d14bab08f6
--- /dev/null
+++ b/contrib/gcc/config/arm/t-rtems
@@ -0,0 +1,10 @@
+# Custom rtems multilibs
+
+MULTILIB_OPTIONS = marm/mthumb
+MULTILIB_DIRNAMES = arm thumb
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = marm=mno-thumb
+
+MULTILIB_OPTIONS += msoft-float/mhard-float
+MULTILIB_DIRNAMES += soft fpu
+MULTILIB_EXCEPTIONS += *mthumb/*mhard-float*
diff --git a/contrib/gcc/config/darwin-protos.h b/contrib/gcc/config/darwin-protos.h
index 41bad646ce1a..3305112065e6 100644
--- a/contrib/gcc/config/darwin-protos.h
+++ b/contrib/gcc/config/darwin-protos.h
@@ -65,8 +65,6 @@ extern const char *darwin_strip_name_encoding (const char *);
extern void machopic_finish (FILE *);
-extern void machopic_output_possible_stub_label (FILE *, const char*);
-
extern void darwin_exception_section (void);
extern void darwin_eh_frame_section (void);
extern void machopic_select_section (tree, int, unsigned HOST_WIDE_INT);
diff --git a/contrib/gcc/config/darwin.c b/contrib/gcc/config/darwin.c
index 8005ecd1e1d6..c946e591a383 100644
--- a/contrib/gcc/config/darwin.c
+++ b/contrib/gcc/config/darwin.c
@@ -900,10 +900,6 @@ machopic_finish (FILE *asm_out_file)
if (! TREE_USED (temp))
continue;
- /* If the symbol is actually defined, we don't need a stub. */
- if (sym_name[0] == '!' && sym_name[1] == 'T')
- continue;
-
sym_name = darwin_strip_name_encoding (sym_name);
sym = alloca (strlen (sym_name) + 2);
@@ -1096,37 +1092,6 @@ update_non_lazy_ptrs (const char *name)
}
}
-/* Function NAME is being defined, and its label has just been output.
- If there's already a reference to a stub for this function, we can
- just emit the stub label now and we don't bother emitting the stub later. */
-
-void
-machopic_output_possible_stub_label (FILE *file, const char *name)
-{
- tree temp;
-
- /* Ensure we're looking at a section-encoded name. */
- if (name[0] != '!' || (name[1] != 't' && name[1] != 'T'))
- return;
-
- for (temp = machopic_stubs;
- temp != NULL_TREE;
- temp = TREE_CHAIN (temp))
- {
- const char *sym_name;
-
- sym_name = IDENTIFIER_POINTER (TREE_VALUE (temp));
- if (sym_name[0] == '!' && (sym_name[1] == 'T' || sym_name[1] == 't')
- && ! strcmp (name+2, sym_name+2))
- {
- ASM_OUTPUT_LABEL (file, IDENTIFIER_POINTER (TREE_PURPOSE (temp)));
- /* Avoid generating a stub for this. */
- TREE_USED (temp) = 0;
- break;
- }
- }
-}
-
/* Scan the list of stubs and update any recorded names whose
stripped name matches the argument. */
diff --git a/contrib/gcc/config/darwin.h b/contrib/gcc/config/darwin.h
index 045091aff5e0..c6ff93f56194 100644
--- a/contrib/gcc/config/darwin.h
+++ b/contrib/gcc/config/darwin.h
@@ -99,7 +99,13 @@ Boston, MA 02111-1307, USA. */
Note that an option name with a prefix that matches another option
name, that also takes an argument, needs to be modified so the
prefix is different, otherwise a '*' after the shorter option will
- match with the longer one. */
+ match with the longer one.
+
+ The SUBTARGET_OPTION_TRANSLATE_TABLE macro, which _must_ be defined
+ in gcc/config/{i386,rs6000}/darwin.h, should contain any additional
+ command-line option translations specific to the particular target
+ architecture. */
+
#define TARGET_OPTION_TRANSLATE_TABLE \
{ "-all_load", "-Zall_load" }, \
{ "-allowable_client", "-Zallowable_client" }, \
@@ -126,7 +132,8 @@ Boston, MA 02111-1307, USA. */
{ "-multi_module", "-Zmulti_module" }, \
{ "-static", "-static -Wa,-static" }, \
{ "-single_module", "-Zsingle_module" }, \
- { "-unexported_symbols_list", "-Zunexported_symbols_list" }
+ { "-unexported_symbols_list", "-Zunexported_symbols_list" }, \
+ SUBTARGET_OPTION_TRANSLATE_TABLE
/* These compiler options take n arguments. */
@@ -390,9 +397,6 @@ do { text_section (); \
|| DECL_INITIAL (DECL)) \
(* targetm.encode_section_info) (DECL, DECL_RTL (DECL), false); \
ASM_OUTPUT_LABEL (FILE, xname); \
- /* Avoid generating stubs for functions we've just defined by \
- outputting any required stub name label now. */ \
- machopic_output_possible_stub_label (FILE, xname); \
} while (0)
#define ASM_DECLARE_CONSTANT_NAME(FILE, NAME, EXP, SIZE) \
diff --git a/contrib/gcc/config/freebsd-spec.h b/contrib/gcc/config/freebsd-spec.h
index e4459ba6aa6c..a98b0e5108c9 100644
--- a/contrib/gcc/config/freebsd-spec.h
+++ b/contrib/gcc/config/freebsd-spec.h
@@ -107,12 +107,12 @@ Boston, MA 02111-1307, USA. */
500016, select the appropriate libc, depending on whether we're
doing profiling or need threads support. At __FreeBSD_version
500016 and later, when threads support is requested include both
- -lc and -lc_r instead of only -lc_r. To make matters interesting,
- we can't actually use __FreeBSD_version provided by <osreldate.h>
- directly since it breaks cross-compiling. As a final twist, make
- it a hard error if -pthread is provided on the command line and gcc
- was configured with --disable-threads (this will help avoid bug
- reports from users complaining about threading when they
+ -lc and the threading lib instead of only -lc_r. To make matters
+ interesting, we can't actually use __FreeBSD_version provided by
+ <osreldate.h> directly since it breaks cross-compiling. As a final
+ twist, make it a hard error if -pthread is provided on the command
+ line and gcc was configured with --disable-threads (this will help
+ avoid bug reports from users complaining about threading when they
misconfigured the gcc bootstrap but are later consulting FreeBSD
manual pages that refer to the mythical -pthread option). */
@@ -129,13 +129,7 @@ is built with the --enable-threads configure-time option.} \
%{pg: -lc_p} \
}"
#else
-#if FBSD_MAJOR >= 5
-#define FBSD_LIB_SPEC " \
- %{!shared: \
- %{!pg: %{pthread:-lc_r} -lc} \
- %{pg: %{pthread:-lc_r_p} -lc_p} \
- }"
-#else
+#if FBSD_MAJOR < 5
#define FBSD_LIB_SPEC " \
%{!shared: \
%{!pg: \
@@ -145,6 +139,12 @@ is built with the --enable-threads configure-time option.} \
%{!pthread:-lc_p} \
%{pthread:-lc_r_p}} \
}"
+#else
+#define FBSD_LIB_SPEC " \
+ %{!shared: \
+ %{!pg: %{pthread:-lpthread} -lc} \
+ %{pg: %{pthread:-lpthread_p} -lc_p} \
+ }"
#endif
#endif
diff --git a/contrib/gcc/config/i386/cygwin1.c b/contrib/gcc/config/i386/cygwin1.c
index 2cab96c195c6..88c44fc9fbf6 100644
--- a/contrib/gcc/config/i386/cygwin1.c
+++ b/contrib/gcc/config/i386/cygwin1.c
@@ -30,13 +30,13 @@ mingw_scan (int argc ATTRIBUTE_UNUSED,
const char *const *argv,
char **spec_machine)
{
- putenv ("GCC_CYGWIN_MINGW=0");
+ putenv (xstrdup ("GCC_CYGWIN_MINGW=0"));
while (*++argv)
if (strcmp (*argv, "-mno-win32") == 0)
- putenv ("GCC_CYGWIN_WIN32=0");
+ putenv (xstrdup ("GCC_CYGWIN_WIN32=0"));
else if (strcmp (*argv, "-mwin32") == 0)
- putenv ("GCC_CYGWIN_WIN32=1");
+ putenv (xstrdup ("GCC_CYGWIN_WIN32=1"));
else if (strcmp (*argv, "-mno-cygwin") == 0)
{
char *p = strstr (*spec_machine, "-cygwin");
@@ -48,7 +48,7 @@ mingw_scan (int argc ATTRIBUTE_UNUSED,
strcpy (s + len, "-mingw32");
*spec_machine = s;
}
- putenv ("GCC_CYGWIN_MINGW=1");
+ putenv (xstrdup ("GCC_CYGWIN_MINGW=1"));
}
return;
}
diff --git a/contrib/gcc/config/i386/darwin.h b/contrib/gcc/config/i386/darwin.h
index fd501bf6a8b6..8246b9eb6744 100644
--- a/contrib/gcc/config/i386/darwin.h
+++ b/contrib/gcc/config/i386/darwin.h
@@ -41,6 +41,10 @@ Boston, MA 02111-1307, USA. */
#undef CC1_SPEC
#define CC1_SPEC "%{!static:-fPIC}"
+/* Use the following macro for any Darwin/x86-specific command-line option
+ translation. */
+#define SUBTARGET_OPTION_TRANSLATE_TABLE
+
#define ASM_SPEC "-arch i386 \
%{Zforce_cpusubtype_ALL:-force_cpusubtype_ALL} \
%{!Zforce_cpusubtype_ALL:%{mmmx:-force_cpusubtype_ALL}\
diff --git a/contrib/gcc/config/i386/emmintrin.h b/contrib/gcc/config/i386/emmintrin.h
index abe450a8f334..286906324a56 100644
--- a/contrib/gcc/config/i386/emmintrin.h
+++ b/contrib/gcc/config/i386/emmintrin.h
@@ -34,7 +34,7 @@
#include <xmmintrin.h>
/* SSE2 */
-typedef int __v2df __attribute__ ((mode (V2DF)));
+typedef double __v2df __attribute__ ((mode (V2DF)));
typedef int __v2di __attribute__ ((mode (V2DI)));
typedef int __v4si __attribute__ ((mode (V4SI)));
typedef int __v8hi __attribute__ ((mode (V8HI)));
diff --git a/contrib/gcc/config/i386/freebsd.h b/contrib/gcc/config/i386/freebsd.h
index 9e538e916a27..4fc7a9ae4786 100644
--- a/contrib/gcc/config/i386/freebsd.h
+++ b/contrib/gcc/config/i386/freebsd.h
@@ -138,12 +138,5 @@ Boston, MA 02111-1307, USA. */
/* FreeBSD sets the rounding precision of the FPU to 53 bits. Let the
compiler get the contents of <float.h> and std::numeric_limits correct. */
-#define SUBTARGET_OVERRIDE_OPTIONS \
- do { \
- if (!TARGET_64BIT) { \
- REAL_MODE_FORMAT (XFmode) \
- = &ieee_extended_intel_96_round_53_format; \
- REAL_MODE_FORMAT (TFmode) \
- = &ieee_extended_intel_96_round_53_format; \
- } \
- } while (0)
+#undef TARGET_96_ROUND_53_LONG_DOUBLE
+#define TARGET_96_ROUND_53_LONG_DOUBLE (!TARGET_64BIT)
diff --git a/contrib/gcc/config/i386/gthr-win32.c b/contrib/gcc/config/i386/gthr-win32.c
index 4e2b282251d8..c53369bca50c 100644
--- a/contrib/gcc/config/i386/gthr-win32.c
+++ b/contrib/gcc/config/i386/gthr-win32.c
@@ -31,11 +31,13 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
the executable file might be covered by the GNU General Public License. */
+#include <windows.h>
#ifndef __GTHREAD_HIDE_WIN32API
# define __GTHREAD_HIDE_WIN32API 1
#endif
+#undef __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
+#define __GTHREAD_I486_INLINE_LOCK_PRIMITIVES
#include <gthr-win32.h>
-#include <windows.h>
/* Windows32 threads specific definitions. The windows32 threading model
does not map well into pthread-inspired gcc's threading model, and so
@@ -61,10 +63,9 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
This may cause incorrect error return due to truncation values on
hw where sizeof (DWORD) > sizeof (int).
- 3. We might consider using Critical Sections instead of Windows32
- mutexes for better performance, but emulating __gthread_mutex_trylock
- interface becomes more complicated (Win9x does not support
- TryEnterCriticalSectioni, while NT does).
+ 3. We are currently using a special mutex instead of the Critical
+ Sections, since Win9x does not support TryEnterCriticalSection
+ (while NT does).
The basic framework should work well enough. In the long term, GCC
needs to use Structured Exception Handling on Windows32. */
@@ -145,23 +146,29 @@ __gthr_win32_setspecific (__gthread_key_t key, const void *ptr)
void
__gthr_win32_mutex_init_function (__gthread_mutex_t *mutex)
{
- /* Create unnamed mutex with default security attr and no initial owner. */
- *mutex = CreateMutex (NULL, 0, NULL);
+ mutex->counter = -1;
+ mutex->sema = CreateSemaphore (NULL, 0, 65535, NULL);
}
int
__gthr_win32_mutex_lock (__gthread_mutex_t *mutex)
{
- if (WaitForSingleObject (*mutex, INFINITE) == WAIT_OBJECT_0)
+ if (InterlockedIncrement (&mutex->counter) == 0 ||
+ WaitForSingleObject (mutex->sema, INFINITE) == WAIT_OBJECT_0)
return 0;
else
- return 1;
+ {
+ /* WaitForSingleObject returns WAIT_FAILED, and we can only do
+ some best-effort cleanup here. */
+ InterlockedDecrement (&mutex->counter);
+ return 1;
+ }
}
int
__gthr_win32_mutex_trylock (__gthread_mutex_t *mutex)
{
- if (WaitForSingleObject (*mutex, 0) == WAIT_OBJECT_0)
+ if (__GTHR_W32_InterlockedCompareExchange (&mutex->counter, 0, -1) < 0)
return 0;
else
return 1;
@@ -170,5 +177,8 @@ __gthr_win32_mutex_trylock (__gthread_mutex_t *mutex)
int
__gthr_win32_mutex_unlock (__gthread_mutex_t *mutex)
{
- return (ReleaseMutex (*mutex) != 0) ? 0 : 1;
+ if (InterlockedDecrement (&mutex->counter) >= 0)
+ return ReleaseSemaphore (mutex->sema, 1, NULL) ? 0 : 1;
+ else
+ return 0;
}
diff --git a/contrib/gcc/config/i386/i386-modes.def b/contrib/gcc/config/i386/i386-modes.def
index 89c83c441872..36c6d42e2d71 100644
--- a/contrib/gcc/config/i386/i386-modes.def
+++ b/contrib/gcc/config/i386/i386-modes.def
@@ -29,6 +29,8 @@ Boston, MA 02111-1307, USA. */
FLOAT_MODE (XF, 12, ieee_extended_intel_96_format);
ADJUST_FLOAT_FORMAT (XF, (TARGET_128BIT_LONG_DOUBLE
? &ieee_extended_intel_128_format
+ : TARGET_96_ROUND_53_LONG_DOUBLE
+ ? &ieee_extended_intel_96_round_53_format
: &ieee_extended_intel_96_format));
ADJUST_BYTESIZE (XF, TARGET_128BIT_LONG_DOUBLE ? 16 : 12);
ADJUST_ALIGNMENT (XF, TARGET_128BIT_LONG_DOUBLE ? 16 : 4);
diff --git a/contrib/gcc/config/i386/i386-protos.h b/contrib/gcc/config/i386/i386-protos.h
index cc1bb813afe5..ea0e8f651582 100644
--- a/contrib/gcc/config/i386/i386-protos.h
+++ b/contrib/gcc/config/i386/i386-protos.h
@@ -93,6 +93,7 @@ extern int memory_displacement_operand (rtx, enum machine_mode);
extern int cmpsi_operand (rtx, enum machine_mode);
extern int long_memory_operand (rtx, enum machine_mode);
extern int aligned_operand (rtx, enum machine_mode);
+extern int compare_operator (rtx, enum machine_mode);
extern enum machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx);
extern int ix86_expand_movstr (rtx, rtx, rtx, rtx);
diff --git a/contrib/gcc/config/i386/i386.c b/contrib/gcc/config/i386/i386.c
index c2f59c9cb15b..9504583b85c0 100644
--- a/contrib/gcc/config/i386/i386.c
+++ b/contrib/gcc/config/i386/i386.c
@@ -522,7 +522,14 @@ const int x86_sse_typeless_stores = m_ATHLON_K8;
const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4;
const int x86_use_ffreep = m_ATHLON_K8;
const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
-const int x86_inter_unit_moves = ~(m_ATHLON_K8);
+
+/* ??? HACK! The following is a lie. SSE can hold e.g. SImode, and
+ indeed *must* be able to hold SImode so that SSE2 shifts are able
+ to work right. But this can result in some mighty surprising
+ register allocation when building kernels. Turning this off should
+ make us less likely to all-of-the-sudden select an SSE register. */
+const int x86_inter_unit_moves = 0; /* ~(m_ATHLON_K8) */
+
const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_PPRO;
/* In case the average insn count for single function invocation is
@@ -2536,6 +2543,34 @@ function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
return;
}
+/* A subroutine of function_arg. We want to pass a parameter whose nominal
+ type is MODE in REGNO. We try to minimize ABI variation, so MODE may not
+ actually be valid for REGNO with the current ISA. In this case, ALT_MODE
+ is used instead. It must be the same size as MODE, and must be known to
+ be valid for REGNO. Finally, ORIG_MODE is the original mode of the
+ parameter, as seen by the type system. This may be different from MODE
+ when we're mucking with things minimizing ABI variations.
+
+ Returns a REG or a PARALLEL as appropriate. */
+
+static rtx
+gen_reg_or_parallel (enum machine_mode mode, enum machine_mode alt_mode,
+ enum machine_mode orig_mode, unsigned int regno)
+{
+ rtx tmp;
+
+ if (HARD_REGNO_MODE_OK (regno, mode))
+ tmp = gen_rtx_REG (mode, regno);
+ else
+ {
+ tmp = gen_rtx_REG (alt_mode, regno);
+ tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
+ tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
+ }
+
+ return tmp;
+}
+
/* Define where to put the arguments to a function.
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
@@ -2550,12 +2585,11 @@ function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
(otherwise it is an extra parameter matching an ellipsis). */
rtx
-function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
- enum machine_mode mode, /* current arg mode */
- tree type, /* type of the argument or 0 if lib support */
- int named) /* != 0 for normal args, == 0 for ... args */
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
+ tree type, int named)
{
- rtx ret = NULL_RTX;
+ enum machine_mode mode = orig_mode;
+ rtx ret = NULL_RTX;
int bytes =
(mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
@@ -2628,7 +2662,8 @@ function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
"changes the ABI");
}
if (cum->sse_nregs)
- ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG);
+ ret = gen_reg_or_parallel (mode, TImode, orig_mode,
+ cum->sse_regno + FIRST_SSE_REG);
}
break;
case V8QImode:
@@ -2644,7 +2679,8 @@ function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
"changes the ABI");
}
if (cum->mmx_nregs)
- ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG);
+ ret = gen_reg_or_parallel (mode, DImode, orig_mode,
+ cum->mmx_regno + FIRST_MMX_REG);
}
break;
}
@@ -4319,6 +4355,12 @@ aligned_operand (rtx op, enum machine_mode mode)
/* Didn't find one -- this must be an aligned address. */
return 1;
}
+
+int
+compare_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return GET_CODE (op) == COMPARE;
+}
/* Initialize the table of extra 80387 mathematical constants. */
@@ -5775,45 +5817,40 @@ ix86_find_base_term (rtx x)
bool
legitimate_constant_p (rtx x)
{
- rtx inner;
-
switch (GET_CODE (x))
{
- case SYMBOL_REF:
- /* TLS symbols are not constant. */
- if (tls_symbolic_operand (x, Pmode))
- return false;
- break;
-
case CONST:
- inner = XEXP (x, 0);
-
- /* Offsets of TLS symbols are never valid.
- Discourage CSE from creating them. */
- if (GET_CODE (inner) == PLUS
- && tls_symbolic_operand (XEXP (inner, 0), Pmode))
- return false;
+ x = XEXP (x, 0);
- if (GET_CODE (inner) == PLUS
- || GET_CODE (inner) == MINUS)
+ if (GET_CODE (x) == PLUS)
{
- if (GET_CODE (XEXP (inner, 1)) != CONST_INT)
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
return false;
- inner = XEXP (inner, 0);
+ x = XEXP (x, 0);
}
/* Only some unspecs are valid as "constants". */
- if (GET_CODE (inner) == UNSPEC)
- switch (XINT (inner, 1))
+ if (GET_CODE (x) == UNSPEC)
+ switch (XINT (x, 1))
{
case UNSPEC_TPOFF:
case UNSPEC_NTPOFF:
- return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
+ return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
case UNSPEC_DTPOFF:
- return local_dynamic_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
+ return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
default:
return false;
}
+
+ /* We must have drilled down to a symbol. */
+ if (!symbolic_operand (x, Pmode))
+ return false;
+ /* FALLTHRU */
+
+ case SYMBOL_REF:
+ /* TLS symbols are never valid. */
+ if (tls_symbolic_operand (x, Pmode))
+ return false;
break;
default:
@@ -10609,10 +10646,11 @@ ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
else if (GET_CODE (operand) == CONST_DOUBLE)
{
REAL_VALUE_TYPE r;
- long l[3];
+ long l[4];
REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
real_to_target (l, &r, mode);
+
/* Do not use shift by 32 to avoid warning on 32bit systems. */
if (HOST_BITS_PER_WIDE_INT >= 64)
parts[0]
@@ -10622,6 +10660,7 @@ ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
DImode);
else
parts[0] = immed_double_const (l[0], l[1], DImode);
+
if (upper_mode == SImode)
parts[1] = gen_int_mode (l[2], SImode);
else if (HOST_BITS_PER_WIDE_INT >= 64)
@@ -14896,10 +14935,29 @@ ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
if (FP_REGNO_P (regno))
return VALID_FP_MODE_P (mode);
if (SSE_REGNO_P (regno))
- return (TARGET_SSE ? VALID_SSE_REG_MODE (mode) : 0);
+ {
+ /* HACK! We didn't change all of the constraints for SSE1 for the
+ scalar modes on the branch. Fortunately, they're not required
+ for ABI compatibility. */
+ if (!TARGET_SSE2 && !VECTOR_MODE_P (mode))
+ return VALID_SSE_REG_MODE (mode);
+
+ /* We implement the move patterns for all vector modes into and
+ out of SSE registers, even when no operation instructions
+ are available. */
+ return (VALID_SSE_REG_MODE (mode)
+ || VALID_SSE2_REG_MODE (mode)
+ || VALID_MMX_REG_MODE (mode)
+ || VALID_MMX_REG_MODE_3DNOW (mode));
+ }
if (MMX_REGNO_P (regno))
- return (TARGET_MMX
- ? VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode) : 0);
+ {
+ /* We implement the move patterns for 3DNOW modes even in MMX mode,
+ so if the register is available at all, then we can move data of
+ the given mode into or out of it. */
+ return (VALID_MMX_REG_MODE (mode)
+ || VALID_MMX_REG_MODE_3DNOW (mode));
+ }
/* We handle both integer and floats in the general purpose registers.
In future we should be able to handle vector modes as well. */
if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
@@ -15235,7 +15293,9 @@ ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
return false;
case FLOAT_EXTEND:
- if (!TARGET_SSE_MATH || !VALID_SSE_REG_MODE (mode))
+ if (!TARGET_SSE_MATH
+ || mode == XFmode
+ || (mode == DFmode && !TARGET_SSE2))
*total = 0;
return false;
diff --git a/contrib/gcc/config/i386/i386.h b/contrib/gcc/config/i386/i386.h
index f5be3409c705..8a912d5f1d1c 100644
--- a/contrib/gcc/config/i386/i386.h
+++ b/contrib/gcc/config/i386/i386.h
@@ -447,6 +447,10 @@ extern int x86_prefetch_sse;
redefines this to 1. */
#define TARGET_MACHO 0
+/* Subtargets may reset this to 1 in order to enable 96-bit long double
+ with the rounding mode forced to 53 bits. */
+#define TARGET_96_ROUND_53_LONG_DOUBLE 0
+
/* This macro is similar to `TARGET_SWITCHES' but defines names of
command options that have values. Its definition is an
initializer with a subgrouping for each command option.
@@ -1059,14 +1063,11 @@ do { \
#define VALID_SSE2_REG_MODE(MODE) \
((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \
- || (MODE) == V2DImode)
+ || (MODE) == V2DImode || (MODE) == DFmode)
#define VALID_SSE_REG_MODE(MODE) \
((MODE) == TImode || (MODE) == V4SFmode || (MODE) == V4SImode \
- || (MODE) == SFmode || (MODE) == TFmode \
- /* Always accept SSE2 modes so that xmmintrin.h compiles. */ \
- || VALID_SSE2_REG_MODE (MODE) \
- || (TARGET_SSE2 && ((MODE) == DFmode || VALID_MMX_REG_MODE (MODE))))
+ || (MODE) == SFmode || (MODE) == TFmode)
#define VALID_MMX_REG_MODE_3DNOW(MODE) \
((MODE) == V2SFmode || (MODE) == SFmode)
@@ -2990,7 +2991,8 @@ do { \
{"zero_extended_scalar_load_operand", {MEM}}, \
{"vector_move_operand", {CONST_VECTOR, SUBREG, REG, MEM}}, \
{"no_seg_address_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \
- LABEL_REF, SUBREG, REG, MEM, PLUS, MULT}},
+ LABEL_REF, SUBREG, REG, MEM, PLUS, MULT}}, \
+ {"compare_operator", {COMPARE}},
/* A list of predicates that do special things with modes, and so
should not elicit warnings for VOIDmode match_operand. */
diff --git a/contrib/gcc/config/i386/i386.md b/contrib/gcc/config/i386/i386.md
index a190d23378f0..93d9dcdba150 100644
--- a/contrib/gcc/config/i386/i386.md
+++ b/contrib/gcc/config/i386/i386.md
@@ -1261,10 +1261,9 @@
""
"xchg{l}\t%1, %0"
[(set_attr "type" "imov")
+ (set_attr "mode" "SI")
(set_attr "pent_pair" "np")
(set_attr "athlon_decode" "vector")
- (set_attr "mode" "SI")
- (set_attr "modrm" "0")
(set_attr "ppro_uops" "few")])
(define_expand "movhi"
@@ -1377,12 +1376,12 @@
(match_operand:HI 1 "register_operand" "+r"))
(set (match_dup 1)
(match_dup 0))]
- "TARGET_PARTIAL_REG_STALL"
- "xchg{w}\t%1, %0"
+ "!TARGET_PARTIAL_REG_STALL || optimize_size"
+ "xchg{l}\t%k1, %k0"
[(set_attr "type" "imov")
+ (set_attr "mode" "SI")
(set_attr "pent_pair" "np")
- (set_attr "mode" "HI")
- (set_attr "modrm" "0")
+ (set_attr "athlon_decode" "vector")
(set_attr "ppro_uops" "few")])
(define_insn "*swaphi_2"
@@ -1390,12 +1389,12 @@
(match_operand:HI 1 "register_operand" "+r"))
(set (match_dup 1)
(match_dup 0))]
- "! TARGET_PARTIAL_REG_STALL"
- "xchg{l}\t%k1, %k0"
+ "TARGET_PARTIAL_REG_STALL"
+ "xchg{w}\t%1, %0"
[(set_attr "type" "imov")
+ (set_attr "mode" "HI")
(set_attr "pent_pair" "np")
- (set_attr "mode" "SI")
- (set_attr "modrm" "0")
+ (set_attr "athlon_decode" "vector")
(set_attr "ppro_uops" "few")])
(define_expand "movstricthi"
@@ -1543,17 +1542,30 @@
DONE;
})
-(define_insn "*swapqi"
+(define_insn "*swapqi_1"
[(set (match_operand:QI 0 "register_operand" "+r")
(match_operand:QI 1 "register_operand" "+r"))
(set (match_dup 1)
(match_dup 0))]
- ""
- "xchg{b}\t%1, %0"
+ "!TARGET_PARTIAL_REG_STALL || optimize_size"
+ "xchg{l}\t%k1, %k0"
[(set_attr "type" "imov")
+ (set_attr "mode" "SI")
(set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")
+ (set_attr "ppro_uops" "few")])
+
+(define_insn "*swapqi_2"
+ [(set (match_operand:QI 0 "register_operand" "+q")
+ (match_operand:QI 1 "register_operand" "+q"))
+ (set (match_dup 1)
+ (match_dup 0))]
+ "TARGET_PARTIAL_REG_STALL"
+ "xchg{b}\t%1, %0"
+ [(set_attr "type" "imov")
(set_attr "mode" "QI")
- (set_attr "modrm" "0")
+ (set_attr "pent_pair" "np")
+ (set_attr "athlon_decode" "vector")
(set_attr "ppro_uops" "few")])
(define_expand "movstrictqi"
@@ -2108,13 +2120,11 @@
"TARGET_64BIT"
"xchg{q}\t%1, %0"
[(set_attr "type" "imov")
+ (set_attr "mode" "DI")
(set_attr "pent_pair" "np")
(set_attr "athlon_decode" "vector")
- (set_attr "mode" "DI")
- (set_attr "modrm" "0")
(set_attr "ppro_uops" "few")])
-
(define_expand "movsf"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
(match_operand:SF 1 "general_operand" ""))]
@@ -6314,9 +6324,13 @@
}
}
[(set (attr "type")
- (if_then_else (match_operand:QI 2 "incdec_operand" "")
+ (if_then_else (match_operand:QI 1 "incdec_operand" "")
(const_string "incdec")
(const_string "alu1")))
+ (set (attr "memory")
+ (if_then_else (match_operand 1 "memory_operand" "")
+ (const_string "load")
+ (const_string "none")))
(set_attr "mode" "QI")])
(define_insn "*addqi_2"
@@ -7872,18 +7886,21 @@
""
"")
-(define_insn "*testqi_1"
+(define_insn "*testqi_1_maybe_si"
[(set (reg 17)
- (compare (and:QI (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm,r")
- (match_operand:QI 1 "general_operand" "n,n,qn,n"))
- (const_int 0)))]
- "ix86_match_ccmode (insn, CCNOmode)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ (compare
+ (and:QI
+ (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm,r")
+ (match_operand:QI 1 "general_operand" "n,n,qn,n"))
+ (const_int 0)))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn,
+ GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) >= 0 ? CCNOmode : CCZmode)"
{
if (which_alternative == 3)
{
- if (GET_CODE (operands[1]) == CONST_INT
- && (INTVAL (operands[1]) & 0xffffff00))
+ if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) < 0)
operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff);
return "test{l}\t{%1, %k0|%k0, %1}";
}
@@ -7894,6 +7911,21 @@
(set_attr "mode" "QI,QI,QI,SI")
(set_attr "pent_pair" "uv,np,uv,np")])
+(define_insn "*testqi_1"
+ [(set (reg 17)
+ (compare
+ (and:QI
+ (match_operand:QI 0 "nonimmediate_operand" "%!*a,q,qm")
+ (match_operand:QI 1 "general_operand" "n,n,qn"))
+ (const_int 0)))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ && ix86_match_ccmode (insn, CCNOmode)"
+ "test{b}\t{%1, %0|%0, %1}"
+ [(set_attr "type" "test")
+ (set_attr "modrm" "0,1,1")
+ (set_attr "mode" "QI")
+ (set_attr "pent_pair" "uv,np,uv")])
+
(define_expand "testqi_ext_ccno_0"
[(set (reg:CCNO 17)
(compare:CCNO
@@ -8012,51 +8044,53 @@
"#")
(define_split
- [(set (reg 17)
- (compare (zero_extract
- (match_operand 0 "nonimmediate_operand" "")
- (match_operand 1 "const_int_operand" "")
- (match_operand 2 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(zero_extract
+ (match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "const_int_operand" "")
+ (match_operand 4 "const_int_operand" ""))
+ (const_int 0)]))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(set (reg:CCNO 17) (compare:CCNO (match_dup 3) (const_int 0)))]
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
{
- HOST_WIDE_INT len = INTVAL (operands[1]);
- HOST_WIDE_INT pos = INTVAL (operands[2]);
+ rtx val = operands[2];
+ HOST_WIDE_INT len = INTVAL (operands[3]);
+ HOST_WIDE_INT pos = INTVAL (operands[4]);
HOST_WIDE_INT mask;
enum machine_mode mode, submode;
- mode = GET_MODE (operands[0]);
- if (GET_CODE (operands[0]) == MEM)
+ mode = GET_MODE (val);
+ if (GET_CODE (val) == MEM)
{
/* ??? Combine likes to put non-volatile mem extractions in QImode
no matter the size of the test. So find a mode that works. */
- if (! MEM_VOLATILE_P (operands[0]))
+ if (! MEM_VOLATILE_P (val))
{
mode = smallest_mode_for_size (pos + len, MODE_INT);
- operands[0] = adjust_address (operands[0], mode, 0);
+ val = adjust_address (val, mode, 0);
}
}
- else if (GET_CODE (operands[0]) == SUBREG
- && (submode = GET_MODE (SUBREG_REG (operands[0])),
+ else if (GET_CODE (val) == SUBREG
+ && (submode = GET_MODE (SUBREG_REG (val)),
GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (submode))
&& pos + len <= GET_MODE_BITSIZE (submode))
{
/* Narrow a paradoxical subreg to prevent partial register stalls. */
mode = submode;
- operands[0] = SUBREG_REG (operands[0]);
+ val = SUBREG_REG (val);
}
else if (mode == HImode && pos + len <= 8)
{
/* Small HImode tests can be converted to QImode. */
mode = QImode;
- operands[0] = gen_lowpart (QImode, operands[0]);
+ val = gen_lowpart (QImode, val);
}
mask = ((HOST_WIDE_INT)1 << (pos + len)) - 1;
mask &= ~(((HOST_WIDE_INT)1 << pos) - 1);
- operands[3] = gen_rtx_AND (mode, operands[0], gen_int_mode (mask, mode));
+ operands[2] = gen_rtx_AND (mode, val, gen_int_mode (mask, mode));
})
;; Convert HImode/SImode test instructions with immediate to QImode ones.
@@ -8065,46 +8099,44 @@
;; Do the conversion only post-reload to avoid limiting of the register class
;; to QI regs.
(define_split
- [(set (reg 17)
- (compare
- (and (match_operand 0 "register_operand" "")
- (match_operand 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
"reload_completed
- && QI_REG_P (operands[0])
+ && QI_REG_P (operands[2])
+ && GET_MODE (operands[2]) != QImode
&& ((ix86_match_ccmode (insn, CCZmode)
- && !(INTVAL (operands[1]) & ~(255 << 8)))
+ && !(INTVAL (operands[3]) & ~(255 << 8)))
|| (ix86_match_ccmode (insn, CCNOmode)
- && !(INTVAL (operands[1]) & ~(127 << 8))))
- && GET_MODE (operands[0]) != QImode"
- [(set (reg:CCNO 17)
- (compare:CCNO
- (and:SI (zero_extract:SI (match_dup 0) (const_int 8) (const_int 8))
- (match_dup 1))
- (const_int 0)))]
- "operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_int_mode (INTVAL (operands[1]) >> 8, SImode);")
+ && !(INTVAL (operands[3]) & ~(127 << 8))))"
+ [(set (match_dup 0)
+ (match_op_dup 1
+ [(and:SI (zero_extract:SI (match_dup 2) (const_int 8) (const_int 8))
+ (match_dup 3))
+ (const_int 0)]))]
+ "operands[2] = gen_lowpart (SImode, operands[2]);
+ operands[3] = gen_int_mode (INTVAL (operands[3]) >> 8, SImode);")
(define_split
- [(set (reg 17)
- (compare
- (and (match_operand 0 "nonimmediate_operand" "")
- (match_operand 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
"reload_completed
- && (!REG_P (operands[0]) || ANY_QI_REG_P (operands[0]))
+ && GET_MODE (operands[2]) != QImode
+ && (!REG_P (operands[2]) || ANY_QI_REG_P (operands[2]))
&& ((ix86_match_ccmode (insn, CCZmode)
- && !(INTVAL (operands[1]) & ~255))
+ && !(INTVAL (operands[3]) & ~255))
|| (ix86_match_ccmode (insn, CCNOmode)
- && !(INTVAL (operands[1]) & ~127)))
- && GET_MODE (operands[0]) != QImode"
- [(set (reg:CCNO 17)
- (compare:CCNO
- (and:QI (match_dup 0)
- (match_dup 1))
- (const_int 0)))]
- "operands[0] = gen_lowpart (QImode, operands[0]);
- operands[1] = gen_lowpart (QImode, operands[1]);")
+ && !(INTVAL (operands[3]) & ~127)))"
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:QI (match_dup 2) (match_dup 3))
+ (const_int 0)]))]
+ "operands[2] = gen_lowpart (QImode, operands[2]);
+ operands[3] = gen_lowpart (QImode, operands[3]);")
;; %%% This used to optimize known byte-wide and operations to memory,
@@ -8381,21 +8413,22 @@
[(set_attr "type" "alu1")
(set_attr "mode" "QI")])
-(define_insn "*andqi_2"
+(define_insn "*andqi_2_maybe_si"
[(set (reg 17)
(compare (and:QI
- (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
- (match_operand:QI 2 "general_operand" "qim,qi,i"))
+ (match_operand:QI 1 "nonimmediate_operand" "%0,0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi,i"))
(const_int 0)))
(set (match_operand:QI 0 "nonimmediate_operand" "=q,qm,*r")
(and:QI (match_dup 1) (match_dup 2)))]
- "ix86_match_ccmode (insn, CCNOmode)
- && ix86_binary_operator_ok (AND, QImode, operands)"
+ "ix86_binary_operator_ok (AND, QImode, operands)
+ && ix86_match_ccmode (insn,
+ GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) >= 0 ? CCNOmode : CCZmode)"
{
if (which_alternative == 2)
{
- if (GET_CODE (operands[2]) == CONST_INT
- && (INTVAL (operands[2]) & 0xffffff00))
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0)
operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
return "and{l}\t{%2, %k0|%k0, %2}";
}
@@ -8404,6 +8437,20 @@
[(set_attr "type" "alu")
(set_attr "mode" "QI,QI,SI")])
+(define_insn "*andqi_2"
+ [(set (reg 17)
+ (compare (and:QI
+ (match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qim,qi"))
+ (const_int 0)))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=q,qm")
+ (and:QI (match_dup 1) (match_dup 2)))]
+ "ix86_match_ccmode (insn, CCNOmode)
+ && ix86_binary_operator_ok (AND, QImode, operands)"
+ "and{b}\t{%2, %0|%0, %2}"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
(define_insn "*andqi_2_slp"
[(set (reg 17)
(compare (and:QI
@@ -9567,8 +9614,8 @@
[(parallel [(set (match_operand:SF 0 "nonimmediate_operand" "")
(neg:SF (match_operand:SF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE)
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "if (TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -9641,12 +9688,12 @@
(use (match_operand:V4SF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (xor:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (xor:V4SF (match_dup 1)
+ (match_dup 2)))]
{
- operands[1] = simplify_gen_subreg (TImode, operands[1], SFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V4SFmode, 0);
+ operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ operands[1] = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
if (operands_match_p (operands[0], operands[2]))
{
rtx tmp;
@@ -9664,7 +9711,7 @@
[(set (match_operand:SF 0 "nonimmediate_operand" "=f#r,rm#f")
(neg:SF (match_operand:SF 1 "nonimmediate_operand" "0,0")))
(clobber (reg:CC 17))]
- "TARGET_80387 && !TARGET_SSE
+ "TARGET_80387
&& ix86_unary_operator_ok (NEG, SFmode, operands)"
"#")
@@ -9707,8 +9754,8 @@
[(parallel [(set (match_operand:DF 0 "nonimmediate_operand" "")
(neg:DF (match_operand:DF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE2)
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "if (TARGET_SSE2 && TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -9809,13 +9856,12 @@
(use (match_operand:V2DF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (xor:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (xor:V2DF (match_dup 1)
+ (match_dup 2)))]
{
operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
- operands[1] = simplify_gen_subreg (TImode, operands[1], DFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V2DFmode, 0);
+ operands[1] = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0);
/* Avoid possible reformatting on the operands. */
if (TARGET_SSE_PARTIAL_REGS && !optimize_size)
emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], operands[0]));
@@ -9974,8 +10020,8 @@
[(parallel [(set (match_operand:SF 0 "nonimmediate_operand" "")
(neg:SF (match_operand:SF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE)
+ "TARGET_80387 || TARGET_SSE_MATH"
+ "if (TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -10049,12 +10095,12 @@
(use (match_operand:V4SF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (and:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (and:V4SF (match_dup 1)
+ (match_dup 2)))]
{
- operands[1] = simplify_gen_subreg (TImode, operands[1], SFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V4SFmode, 0);
+ operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ operands[1] = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0);
if (operands_match_p (operands[0], operands[2]))
{
rtx tmp;
@@ -10071,7 +10117,7 @@
[(set (match_operand:SF 0 "nonimmediate_operand" "=f#r,rm#f")
(abs:SF (match_operand:SF 1 "nonimmediate_operand" "0,0")))
(clobber (reg:CC 17))]
- "TARGET_80387 && ix86_unary_operator_ok (ABS, SFmode, operands) && !TARGET_SSE"
+ "TARGET_80387 && ix86_unary_operator_ok (ABS, SFmode, operands)"
"#")
(define_split
@@ -10113,8 +10159,8 @@
[(parallel [(set (match_operand:DF 0 "nonimmediate_operand" "")
(neg:DF (match_operand:DF 1 "nonimmediate_operand" "")))
(clobber (reg:CC 17))])]
- "TARGET_80387"
- "if (TARGET_SSE2)
+ "TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)"
+ "if (TARGET_SSE2 && TARGET_SSE_MATH)
{
/* In case operand is in memory, we will not use SSE. */
if (memory_operand (operands[0], VOIDmode)
@@ -10203,13 +10249,12 @@
(use (match_operand:V2DF 2 "nonimmediate_operand" ""))
(clobber (reg:CC 17))]
"reload_completed && SSE_REG_P (operands[0])"
- [(set (subreg:TI (match_dup 0) 0)
- (and:TI (match_dup 1)
- (match_dup 2)))]
+ [(set (match_dup 0)
+ (and:V2DF (match_dup 1)
+ (match_dup 2)))]
{
operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
- operands[1] = simplify_gen_subreg (TImode, operands[1], DFmode, 0);
- operands[2] = simplify_gen_subreg (TImode, operands[2], V2DFmode, 0);
+ operands[1] = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0);
/* Avoid possible reformatting on the operands. */
if (TARGET_SSE_PARTIAL_REGS && !optimize_size)
emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], operands[0]));
@@ -10383,17 +10428,19 @@
(set_attr "mode" "DI")])
(define_split
- [(set (reg 17)
- (compare (not:DI (match_operand:DI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "nonimmediate_operand" "")
- (not:DI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:DI (match_operand:DI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:DI 1 "nonimmediate_operand" "")
+ (not:DI (match_dup 3)))]
"TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:DI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:DI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2
+ [(xor:DI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:DI (match_dup 3) (const_int -1)))])]
"")
(define_expand "one_cmplsi2"
@@ -10432,17 +10479,18 @@
(set_attr "mode" "SI")])
(define_split
- [(set (reg 17)
- (compare (not:SI (match_operand:SI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:SI 0 "nonimmediate_operand" "")
- (not:SI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:SI (match_operand:SI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:SI 1 "nonimmediate_operand" "")
+ (not:SI (match_dup 3)))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:SI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:SI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:SI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:SI (match_dup 3) (const_int -1)))])]
"")
;; ??? Currently never generated - xor is used instead.
@@ -10459,17 +10507,18 @@
(set_attr "mode" "SI")])
(define_split
- [(set (reg 17)
- (compare (not:SI (match_operand:SI 1 "register_operand" ""))
- (const_int 0)))
- (set (match_operand:DI 0 "register_operand" "")
- (zero_extend:DI (not:SI (match_dup 1))))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:SI (match_operand:SI 3 "register_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:DI 1 "register_operand" "")
+ (zero_extend:DI (not:SI (match_dup 3))))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:SI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (zero_extend:DI (xor:SI (match_dup 1) (const_int -1))))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:SI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (zero_extend:DI (xor:SI (match_dup 3) (const_int -1))))])]
"")
(define_expand "one_cmplhi2"
@@ -10499,17 +10548,18 @@
(set_attr "mode" "HI")])
(define_split
- [(set (reg 17)
- (compare (not:HI (match_operand:HI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:HI 0 "nonimmediate_operand" "")
- (not:HI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:HI (match_operand:HI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:HI 1 "nonimmediate_operand" "")
+ (not:HI (match_dup 3)))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:HI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:HI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:HI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:HI (match_dup 3) (const_int -1)))])]
"")
;; %%% Potential partial reg stall on alternative 1. What to do?
@@ -10542,17 +10592,18 @@
(set_attr "mode" "QI")])
(define_split
- [(set (reg 17)
- (compare (not:QI (match_operand:QI 1 "nonimmediate_operand" ""))
- (const_int 0)))
- (set (match_operand:QI 0 "nonimmediate_operand" "")
- (not:QI (match_dup 1)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(not:QI (match_operand:QI 3 "nonimmediate_operand" ""))
+ (const_int 0)]))
+ (set (match_operand:QI 1 "nonimmediate_operand" "")
+ (not:QI (match_dup 3)))]
"ix86_match_ccmode (insn, CCNOmode)"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (xor:QI (match_dup 1) (const_int -1))
- (const_int 0)))
- (set (match_dup 0)
- (xor:QI (match_dup 1) (const_int -1)))])]
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(xor:QI (match_dup 3) (const_int -1))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (xor:QI (match_dup 3) (const_int -1)))])]
"")
;; Arithmetic shift instructions
@@ -17003,7 +17054,8 @@
(clobber (match_operand 6 "" ""))
(clobber (reg:CC 17))]
"!SSE_REG_P (operands[0]) && reload_completed
- && VALID_SSE_REG_MODE (GET_MODE (operands[0]))"
+ && (GET_MODE (operands[0]) == SFmode
+ || (TARGET_SSE2 && GET_MODE (operands[0]) == DFmode))"
[(const_int 0)]
{
ix86_compare_op0 = operands[5];
@@ -17020,22 +17072,60 @@
;; nand op0, op3 - load op3 to op0 if comparison was false
;; or op2, op0 - get the nonzero one into the result.
(define_split
- [(set (match_operand 0 "register_operand" "")
- (if_then_else (match_operator 1 "sse_comparison_operator"
- [(match_operand 4 "register_operand" "")
- (match_operand 5 "nonimmediate_operand" "")])
- (match_operand 2 "register_operand" "")
- (match_operand 3 "register_operand" "")))
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operator 1 "sse_comparison_operator"
+ [(match_operand:SF 4 "register_operand" "")
+ (match_operand:SF 5 "nonimmediate_operand" "")])
+ (match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")))
(clobber (match_operand 6 "" ""))
(clobber (reg:CC 17))]
"SSE_REG_P (operands[0]) && reload_completed"
[(set (match_dup 4) (match_op_dup 1 [(match_dup 4) (match_dup 5)]))
- (set (subreg:TI (match_dup 2) 0) (and:TI (subreg:TI (match_dup 2) 0)
- (subreg:TI (match_dup 4) 0)))
- (set (subreg:TI (match_dup 4) 0) (and:TI (not:TI (subreg:TI (match_dup 4) 0))
- (subreg:TI (match_dup 3) 0)))
- (set (subreg:TI (match_dup 0) 0) (ior:TI (subreg:TI (match_dup 6) 0)
- (subreg:TI (match_dup 7) 0)))]
+ (set (match_dup 2) (and:V4SF (match_dup 2)
+ (match_dup 8)))
+ (set (match_dup 8) (and:V4SF (not:V4SF (match_dup 8))
+ (match_dup 3)))
+ (set (match_dup 0) (ior:V4SF (match_dup 6)
+ (match_dup 7)))]
+{
+ /* If op2 == op3, op3 would be clobbered before it is used. */
+ if (operands_match_p (operands[2], operands[3]))
+ {
+ emit_move_insn (operands[0], operands[2]);
+ DONE;
+ }
+
+ PUT_MODE (operands[1], GET_MODE (operands[0]));
+ if (operands_match_p (operands[0], operands[4]))
+ operands[6] = operands[4], operands[7] = operands[2];
+ else
+ operands[6] = operands[2], operands[7] = operands[4];
+ operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ operands[2] = simplify_gen_subreg (V4SFmode, operands[2], SFmode, 0);
+ operands[3] = simplify_gen_subreg (V4SFmode, operands[3], SFmode, 0);
+ operands[8] = simplify_gen_subreg (V4SFmode, operands[4], SFmode, 0);
+ operands[6] = simplify_gen_subreg (V4SFmode, operands[6], SFmode, 0);
+ operands[7] = simplify_gen_subreg (V4SFmode, operands[7], SFmode, 0);
+})
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operator 1 "sse_comparison_operator"
+ [(match_operand:DF 4 "register_operand" "")
+ (match_operand:DF 5 "nonimmediate_operand" "")])
+ (match_operand:DF 2 "register_operand" "")
+ (match_operand:DF 3 "register_operand" "")))
+ (clobber (match_operand 6 "" ""))
+ (clobber (reg:CC 17))]
+ "SSE_REG_P (operands[0]) && reload_completed"
+ [(set (match_dup 4) (match_op_dup 1 [(match_dup 4) (match_dup 5)]))
+ (set (match_dup 2) (and:V2DF (match_dup 2)
+ (match_dup 8)))
+ (set (match_dup 8) (and:V2DF (not:V2DF (match_dup 8))
+ (match_dup 3)))
+ (set (match_dup 0) (ior:V2DF (match_dup 6)
+ (match_dup 7)))]
{
if (GET_MODE (operands[2]) == DFmode
&& TARGET_SSE_PARTIAL_REGS && !optimize_size)
@@ -17058,6 +17148,12 @@
operands[6] = operands[4], operands[7] = operands[2];
else
operands[6] = operands[2], operands[7] = operands[4];
+ operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
+ operands[2] = simplify_gen_subreg (V2DFmode, operands[2], DFmode, 0);
+ operands[3] = simplify_gen_subreg (V2DFmode, operands[3], DFmode, 0);
+ operands[8] = simplify_gen_subreg (V2DFmode, operands[4], DFmode, 0);
+ operands[6] = simplify_gen_subreg (V2DFmode, operands[6], DFmode, 0);
+ operands[7] = simplify_gen_subreg (V2DFmode, operands[7], DFmode, 0);
})
;; Special case of conditional move we can handle effectively.
@@ -17144,18 +17240,55 @@
"#")
(define_split
- [(set (match_operand 0 "register_operand" "")
- (if_then_else (match_operator 1 "comparison_operator"
- [(match_operand 4 "nonimmediate_operand" "")
- (match_operand 5 "nonimmediate_operand" "")])
- (match_operand 2 "nonmemory_operand" "")
- (match_operand 3 "nonmemory_operand" "")))]
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand:SF 4 "nonimmediate_operand" "")
+ (match_operand:SF 5 "nonimmediate_operand" "")])
+ (match_operand:SF 2 "nonmemory_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
"SSE_REG_P (operands[0]) && reload_completed
&& (const0_operand (operands[2], GET_MODE (operands[0]))
|| const0_operand (operands[3], GET_MODE (operands[0])))"
[(set (match_dup 0) (match_op_dup 1 [(match_dup 0) (match_dup 5)]))
- (set (subreg:TI (match_dup 0) 0) (and:TI (match_dup 6)
- (match_dup 7)))]
+ (set (match_dup 8) (and:V4SF (match_dup 6) (match_dup 7)))]
+{
+ PUT_MODE (operands[1], GET_MODE (operands[0]));
+ if (!sse_comparison_operator (operands[1], VOIDmode)
+ || !rtx_equal_p (operands[0], operands[4]))
+ {
+ rtx tmp = operands[5];
+ operands[5] = operands[4];
+ operands[4] = tmp;
+ PUT_CODE (operands[1], swap_condition (GET_CODE (operands[1])));
+ }
+ if (!rtx_equal_p (operands[0], operands[4]))
+ abort ();
+ operands[8] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0);
+ if (const0_operand (operands[2], GET_MODE (operands[2])))
+ {
+ operands[7] = operands[3];
+ operands[6] = gen_rtx_NOT (V4SFmode, operands[5]);
+ }
+ else
+ {
+ operands[7] = operands[2];
+ operands[6] = operands[8];
+ }
+ operands[7] = simplify_gen_subreg (V4SFmode, operands[7], SFmode, 0);
+})
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand:DF 4 "nonimmediate_operand" "")
+ (match_operand:DF 5 "nonimmediate_operand" "")])
+ (match_operand:DF 2 "nonmemory_operand" "")
+ (match_operand:DF 3 "nonmemory_operand" "")))]
+ "SSE_REG_P (operands[0]) && reload_completed
+ && (const0_operand (operands[2], GET_MODE (operands[0]))
+ || const0_operand (operands[3], GET_MODE (operands[0])))"
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 0) (match_dup 5)]))
+ (set (match_dup 8) (and:V2DF (match_dup 6) (match_dup 7)))]
{
if (TARGET_SSE_PARTIAL_REGS && !optimize_size
&& GET_MODE (operands[2]) == DFmode)
@@ -17182,19 +17315,18 @@
}
if (!rtx_equal_p (operands[0], operands[4]))
abort ();
- if (const0_operand (operands[2], GET_MODE (operands[0])))
+ operands[8] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0);
+ if (const0_operand (operands[2], GET_MODE (operands[2])))
{
operands[7] = operands[3];
- operands[6] = gen_rtx_NOT (TImode, gen_rtx_SUBREG (TImode, operands[0],
- 0));
+ operands[6] = gen_rtx_NOT (V2DFmode, operands[8]);
}
else
{
operands[7] = operands[2];
- operands[6] = gen_rtx_SUBREG (TImode, operands[0], 0);
+ operands[6] = operands[8];
}
- operands[7] = simplify_gen_subreg (TImode, operands[7],
- GET_MODE (operands[7]), 0);
+ operands[7] = simplify_gen_subreg (V2DFmode, operands[7], DFmode, 0);
})
(define_expand "allocate_stack_worker"
@@ -17319,52 +17451,56 @@
; instruction size is unchanged, except in the %eax case for
; which it is increased by one byte, hence the ! optimize_size.
(define_split
- [(set (reg 17)
- (compare (and (match_operand 1 "aligned_operand" "")
- (match_operand 2 "const_int_operand" ""))
- (const_int 0)))
- (set (match_operand 0 "register_operand" "")
- (and (match_dup 1) (match_dup 2)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 2 "compare_operator"
+ [(and (match_operand 3 "aligned_operand" "")
+ (match_operand 4 "const_int_operand" ""))
+ (const_int 0)]))
+ (set (match_operand 1 "register_operand" "")
+ (and (match_dup 3) (match_dup 4)))]
"! TARGET_PARTIAL_REG_STALL && reload_completed
/* Ensure that the operand will remain sign-extended immediate. */
- && ix86_match_ccmode (insn, INTVAL (operands[2]) >= 0 ? CCNOmode : CCZmode)
+ && ix86_match_ccmode (insn, INTVAL (operands[4]) >= 0 ? CCNOmode : CCZmode)
&& ! optimize_size
- && ((GET_MODE (operands[0]) == HImode && ! TARGET_FAST_PREFIX)
- || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO (and:SI (match_dup 1) (match_dup 2))
- (const_int 0)))
- (set (match_dup 0)
- (and:SI (match_dup 1) (match_dup 2)))])]
- "operands[2]
- = gen_int_mode (INTVAL (operands[2])
- & GET_MODE_MASK (GET_MODE (operands[0])),
- SImode);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_lowpart (SImode, operands[1]);")
+ && ((GET_MODE (operands[1]) == HImode && ! TARGET_FAST_PREFIX)
+ || (GET_MODE (operands[1]) == QImode && TARGET_PROMOTE_QImode))"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 2 [(and:SI (match_dup 3) (match_dup 4))
+ (const_int 0)]))
+ (set (match_dup 1)
+ (and:SI (match_dup 3) (match_dup 4)))])]
+{
+ operands[4]
+ = gen_int_mode (INTVAL (operands[4])
+ & GET_MODE_MASK (GET_MODE (operands[1])), SImode);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[3] = gen_lowpart (SImode, operands[3]);
+})
; Don't promote the QImode tests, as i386 doesn't have encoding of
; the TEST instruction with 32-bit sign-extended immediate and thus
; the instruction size would at least double, which is not what we
; want even with ! optimize_size.
(define_split
- [(set (reg 17)
- (compare (and (match_operand:HI 0 "aligned_operand" "")
- (match_operand:HI 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and (match_operand:HI 2 "aligned_operand" "")
+ (match_operand:HI 3 "const_int_operand" ""))
+ (const_int 0)]))]
"! TARGET_PARTIAL_REG_STALL && reload_completed
/* Ensure that the operand will remain sign-extended immediate. */
- && ix86_match_ccmode (insn, INTVAL (operands[1]) >= 0 ? CCNOmode : CCZmode)
+ && ix86_match_ccmode (insn, INTVAL (operands[3]) >= 0 ? CCNOmode : CCZmode)
&& ! TARGET_FAST_PREFIX
&& ! optimize_size"
- [(set (reg:CCNO 17)
- (compare:CCNO (and:SI (match_dup 0) (match_dup 1))
- (const_int 0)))]
- "operands[1]
- = gen_int_mode (INTVAL (operands[1])
- & GET_MODE_MASK (GET_MODE (operands[0])),
- SImode);
- operands[0] = gen_lowpart (SImode, operands[0]);")
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:SI (match_dup 2) (match_dup 3))
+ (const_int 0)]))]
+{
+ operands[3]
+ = gen_int_mode (INTVAL (operands[3])
+ & GET_MODE_MASK (GET_MODE (operands[2])), SImode);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+})
(define_split
[(set (match_operand 0 "register_operand" "")
@@ -17537,13 +17673,14 @@
;; Don't compare memory with zero, load and use a test instead.
(define_peephole2
- [(set (reg 17)
- (compare (match_operand:SI 0 "memory_operand" "")
- (const_int 0)))
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(match_operand:SI 2 "memory_operand" "")
+ (const_int 0)]))
(match_scratch:SI 3 "r")]
"ix86_match_ccmode (insn, CCNOmode) && ! optimize_size"
- [(set (match_dup 3) (match_dup 0))
- (set (reg:CCNO 17) (compare:CCNO (match_dup 3) (const_int 0)))]
+ [(set (match_dup 3) (match_dup 2))
+ (set (match_dup 0) (match_op_dup 1 [(match_dup 3) (const_int 0)]))]
"")
;; NOT is not pairable on Pentium, while XOR is, but one byte longer.
@@ -17607,77 +17744,77 @@
;; versions if we're concerned about partial register stalls.
(define_peephole2
- [(set (reg 17)
- (compare (and:SI (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "immediate_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:SI (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "immediate_operand" ""))
+ (const_int 0)]))]
"ix86_match_ccmode (insn, CCNOmode)
- && (true_regnum (operands[0]) != 0
- || (GET_CODE (operands[1]) == CONST_INT
- && CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'K')))
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
+ && (true_regnum (operands[2]) != 0
+ || (GET_CODE (operands[3]) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (operands[3]), 'K')))
+ && peep2_reg_dead_p (1, operands[2])"
[(parallel
- [(set (reg:CCNO 17)
- (compare:CCNO (and:SI (match_dup 0)
- (match_dup 1))
- (const_int 0)))
- (set (match_dup 0)
- (and:SI (match_dup 0) (match_dup 1)))])]
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:SI (match_dup 2) (match_dup 3))
+ (const_int 0)]))
+ (set (match_dup 2)
+ (and:SI (match_dup 2) (match_dup 3)))])]
"")
;; We don't need to handle HImode case, because it will be promoted to SImode
;; on ! TARGET_PARTIAL_REG_STALL
(define_peephole2
- [(set (reg 17)
- (compare (and:QI (match_operand:QI 0 "register_operand" "")
- (match_operand:QI 1 "immediate_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:QI (match_operand:QI 2 "register_operand" "")
+ (match_operand:QI 3 "immediate_operand" ""))
+ (const_int 0)]))]
"! TARGET_PARTIAL_REG_STALL
&& ix86_match_ccmode (insn, CCNOmode)
- && true_regnum (operands[0]) != 0
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
+ && true_regnum (operands[2]) != 0
+ && peep2_reg_dead_p (1, operands[2])"
[(parallel
- [(set (reg:CCNO 17)
- (compare:CCNO (and:QI (match_dup 0)
- (match_dup 1))
- (const_int 0)))
- (set (match_dup 0)
- (and:QI (match_dup 0) (match_dup 1)))])]
+ [(set (match_dup 0)
+ (match_op_dup 1 [(and:QI (match_dup 2) (match_dup 3))
+ (const_int 0)]))
+ (set (match_dup 2)
+ (and:QI (match_dup 2) (match_dup 3)))])]
"")
(define_peephole2
- [(set (reg 17)
- (compare
- (and:SI
- (zero_extract:SI
- (match_operand 0 "ext_register_operand" "")
- (const_int 8)
- (const_int 8))
- (match_operand 1 "const_int_operand" ""))
- (const_int 0)))]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(and:SI
+ (zero_extract:SI
+ (match_operand 2 "ext_register_operand" "")
+ (const_int 8)
+ (const_int 8))
+ (match_operand 3 "const_int_operand" ""))
+ (const_int 0)]))]
"! TARGET_PARTIAL_REG_STALL
&& ix86_match_ccmode (insn, CCNOmode)
- && true_regnum (operands[0]) != 0
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCNO 17)
- (compare:CCNO
- (and:SI
- (zero_extract:SI
- (match_dup 0)
- (const_int 8)
- (const_int 8))
- (match_dup 1))
- (const_int 0)))
- (set (zero_extract:SI (match_dup 0)
+ && true_regnum (operands[2]) != 0
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 1
+ [(and:SI
+ (zero_extract:SI
+ (match_dup 2)
+ (const_int 8)
+ (const_int 8))
+ (match_dup 3))
+ (const_int 0)]))
+ (set (zero_extract:SI (match_dup 2)
(const_int 8)
(const_int 8))
(and:SI
(zero_extract:SI
- (match_dup 0)
+ (match_dup 2)
(const_int 8)
(const_int 8))
- (match_dup 1)))])]
+ (match_dup 3)))])]
"")
;; Don't do logical operations with memory inputs.
@@ -17979,66 +18116,20 @@
"")
;; Convert compares with 1 to shorter inc/dec operations when CF is not
-;; required and register dies.
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:SI 0 "register_operand" "")
- (match_operand:SI 1 "incdec_operand" "")))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (match_dup 1)))
- (clobber (match_dup 0))])]
- "")
-
+;; required and register dies. Similarly for 128 to plus -128.
(define_peephole2
- [(set (reg 17)
- (compare (match_operand:HI 0 "register_operand" "")
- (match_operand:HI 1 "incdec_operand" "")))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (match_dup 1)))
- (clobber (match_dup 0))])]
- "")
-
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:QI 0 "register_operand" "")
- (match_operand:QI 1 "incdec_operand" "")))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (match_dup 1)))
- (clobber (match_dup 0))])]
- "")
-
-;; Convert compares with 128 to shorter add -128
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:SI 0 "register_operand" "")
- (const_int 128)))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (const_int 128)))
- (clobber (match_dup 0))])]
- "")
-
-(define_peephole2
- [(set (reg 17)
- (compare (match_operand:HI 0 "register_operand" "")
- (const_int 128)))]
- "ix86_match_ccmode (insn, CCGCmode)
- && find_regno_note (insn, REG_DEAD, true_regnum (operands[0]))"
- [(parallel [(set (reg:CCGC 17)
- (compare:CCGC (match_dup 0)
- (const_int 128)))
- (clobber (match_dup 0))])]
+ [(set (match_operand 0 "flags_reg_operand" "")
+ (match_operator 1 "compare_operator"
+ [(match_operand 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" "")]))]
+ "(INTVAL (operands[3]) == -1
+ || INTVAL (operands[3]) == 1
+ || INTVAL (operands[3]) == 128)
+ && ix86_match_ccmode (insn, CCGCmode)
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (match_op_dup 1 [(match_dup 2) (match_dup 3)]))
+ (clobber (match_dup 2))])]
"")
(define_peephole2
@@ -18326,7 +18417,7 @@
{
if (constant_call_address_operand (operands[1], QImode))
return "call\t%P1";
- return "call\t%*%1";
+ return "call\t%A1";
}
[(set_attr "type" "callv")])
@@ -18338,7 +18429,7 @@
{
if (constant_call_address_operand (operands[1], QImode))
return "jmp\t%P1";
- return "jmp\t%*%1";
+ return "jmp\t%A1";
}
[(set_attr "type" "callv")])
@@ -18422,10 +18513,11 @@
;; Moves for SSE/MMX regs.
-(define_insn "movv4sf_internal"
+(define_insn "*movv4sf_internal"
[(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V4SF 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE"
+ "TARGET_SSE
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
xorps\t%0, %0
movaps\t{%1, %0|%0, %1}
@@ -18436,7 +18528,7 @@
(define_split
[(set (match_operand:V4SF 0 "register_operand" "")
(match_operand:V4SF 1 "zero_extended_scalar_load_operand" ""))]
- "TARGET_SSE"
+ "TARGET_SSE && reload_completed"
[(set (match_dup 0)
(vec_merge:V4SF
(vec_duplicate:V4SF (match_dup 1))
@@ -18447,10 +18539,11 @@
operands[2] = CONST0_RTX (V4SFmode);
})
-(define_insn "movv4si_internal"
+(define_insn "*movv4si_internal"
[(set (match_operand:V4SI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V4SI 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE"
+ "TARGET_SSE
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
{
@@ -18487,10 +18580,11 @@
(const_string "TI"))]
(const_string "TI")))])
-(define_insn "movv2di_internal"
+(define_insn "*movv2di_internal"
[(set (match_operand:V2DI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V2DI 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE"
+ "TARGET_SSE
+ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
{
@@ -18530,7 +18624,7 @@
(define_split
[(set (match_operand:V2DF 0 "register_operand" "")
(match_operand:V2DF 1 "zero_extended_scalar_load_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE2 && reload_completed"
[(set (match_dup 0)
(vec_merge:V2DF
(vec_duplicate:V2DF (match_dup 1))
@@ -18541,52 +18635,80 @@
operands[2] = CONST0_RTX (V2DFmode);
})
-(define_insn "movv8qi_internal"
- [(set (match_operand:V8QI 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V8QI 1 "vector_move_operand" "C,ym,y"))]
+(define_insn "*movv2si_internal"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V2SI 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
"TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxmov")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
-(define_insn "movv4hi_internal"
- [(set (match_operand:V4HI 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V4HI 1 "vector_move_operand" "C,ym,y"))]
+(define_insn "*movv4hi_internal"
+ [(set (match_operand:V4HI 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V4HI 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
"TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxmov")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
-(define_insn "movv2si_internal"
- [(set (match_operand:V2SI 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V2SI 1 "vector_move_operand" "C,ym,y"))]
+(define_insn "*movv8qi_internal"
+ [(set (match_operand:V8QI 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V8QI 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
"TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxcvt")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
-(define_insn "movv2sf_internal"
- [(set (match_operand:V2SF 0 "nonimmediate_operand" "=y,y,m")
- (match_operand:V2SF 1 "vector_move_operand" "C,ym,y"))]
- "TARGET_3DNOW
+(define_insn "*movv2sf_internal"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand"
+ "=y,y ,m,!y,!*Y,*x,?*x,?m")
+ (match_operand:V2SF 1 "vector_move_operand"
+ "C ,ym,y,*Y,y ,C ,*xm,*x"))]
+ "TARGET_MMX
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
"@
pxor\t%0, %0
movq\t{%1, %0|%0, %1}
+ movq\t{%1, %0|%0, %1}
+ movdq2q\t{%1, %0|%0, %1}
+ movq2dq\t{%1, %0|%0, %1}
+ xorps\t%0, %0
+ movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}"
- [(set_attr "type" "mmxcvt")
+ [(set_attr "type" "mmxmov,mmxmov,mmxmov,ssecvt,ssecvt,ssemov,ssemov,ssemov")
(set_attr "mode" "DI")])
(define_expand "movti"
@@ -18606,17 +18728,14 @@
(match_operand:TF 1 "nonimmediate_operand" ""))]
"TARGET_64BIT"
{
- if (TARGET_64BIT)
- ix86_expand_move (TFmode, operands);
- else
- ix86_expand_vector_move (TFmode, operands);
+ ix86_expand_move (TFmode, operands);
DONE;
})
-(define_insn "movv2df_internal"
+(define_insn "*movv2df_internal"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V2DF 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE2
+ "TARGET_SSE
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
@@ -18638,7 +18757,9 @@
}
[(set_attr "type" "ssemov")
(set (attr "mode")
- (cond [(eq_attr "alternative" "0,1")
+ (cond [(eq (symbol_ref "TARGET_SSE2") (const_int 0))
+ (const_string "V4SF")
+ (eq_attr "alternative" "0,1")
(if_then_else
(ne (symbol_ref "optimize_size")
(const_int 0))
@@ -18654,10 +18775,10 @@
(const_string "V2DF"))]
(const_string "V2DF")))])
-(define_insn "movv8hi_internal"
+(define_insn "*movv8hi_internal"
[(set (match_operand:V8HI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V8HI 1 "vector_move_operand" "C,xm,x"))]
- "TARGET_SSE2
+ "TARGET_SSE
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
@@ -18695,10 +18816,10 @@
(const_string "TI"))]
(const_string "TI")))])
-(define_insn "movv16qi_internal"
+(define_insn "*movv16qi_internal"
[(set (match_operand:V16QI 0 "nonimmediate_operand" "=x,x,m")
- (match_operand:V16QI 1 "nonimmediate_operand" "C,xm,x"))]
- "TARGET_SSE2
+ (match_operand:V16QI 1 "vector_move_operand" "C,xm,x"))]
+ "TARGET_SSE
&& (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
{
switch (which_alternative)
@@ -18739,7 +18860,7 @@
(define_expand "movv2df"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "")
(match_operand:V2DF 1 "nonimmediate_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE"
{
ix86_expand_vector_move (V2DFmode, operands);
DONE;
@@ -18748,7 +18869,7 @@
(define_expand "movv8hi"
[(set (match_operand:V8HI 0 "nonimmediate_operand" "")
(match_operand:V8HI 1 "nonimmediate_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE"
{
ix86_expand_vector_move (V8HImode, operands);
DONE;
@@ -18757,7 +18878,7 @@
(define_expand "movv16qi"
[(set (match_operand:V16QI 0 "nonimmediate_operand" "")
(match_operand:V16QI 1 "nonimmediate_operand" ""))]
- "TARGET_SSE2"
+ "TARGET_SSE"
{
ix86_expand_vector_move (V16QImode, operands);
DONE;
@@ -18820,7 +18941,7 @@
(define_expand "movv2sf"
[(set (match_operand:V2SF 0 "nonimmediate_operand" "")
(match_operand:V2SF 1 "nonimmediate_operand" ""))]
- "TARGET_3DNOW"
+ "TARGET_MMX"
{
ix86_expand_vector_move (V2SFmode, operands);
DONE;
@@ -18841,19 +18962,19 @@
(define_insn "*pushv2di"
[(set (match_operand:V2DI 0 "push_operand" "=<")
(match_operand:V2DI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv8hi"
[(set (match_operand:V8HI 0 "push_operand" "=<")
(match_operand:V8HI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv16qi"
[(set (match_operand:V16QI 0 "push_operand" "=<")
(match_operand:V16QI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv4sf"
@@ -18865,7 +18986,7 @@
(define_insn "*pushv4si"
[(set (match_operand:V4SI 0 "push_operand" "=<")
(match_operand:V4SI 1 "register_operand" "x"))]
- "TARGET_SSE2"
+ "TARGET_SSE"
"#")
(define_insn "*pushv2si"
@@ -18889,7 +19010,7 @@
(define_insn "*pushv2sf"
[(set (match_operand:V2SF 0 "push_operand" "=<")
(match_operand:V2SF 1 "register_operand" "y"))]
- "TARGET_3DNOW"
+ "TARGET_MMX"
"#")
(define_split
@@ -18915,7 +19036,7 @@
operands[3] = GEN_INT (-GET_MODE_SIZE (GET_MODE (operands[0])));")
-(define_insn "movti_internal"
+(define_insn "*movti_internal"
[(set (match_operand:TI 0 "nonimmediate_operand" "=x,x,m")
(match_operand:TI 1 "vector_move_operand" "C,xm,x"))]
"TARGET_SSE && !TARGET_64BIT
@@ -19462,26 +19583,16 @@
;; of DImode subregs again!
;; SSE1 single precision floating point logical operation
(define_expand "sse_andv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (and:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (and:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
"TARGET_SSE"
"")
(define_insn "*sse_andv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "andps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_andsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (and:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"andps\t{%2, %0|%0, %2}"
@@ -19489,51 +19600,32 @@
(set_attr "mode" "V4SF")])
(define_expand "sse_nandv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (and:TI (not:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0))
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (and:V4SF (not:V4SF (match_operand:V4SF 1 "register_operand" ""))
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
"TARGET_SSE"
"")
(define_insn "*sse_nandv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE"
- "andnps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_nandsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (and:V4SF (not:V4SF (match_operand:V4SF 1 "register_operand" "0"))
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE"
"andnps\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "V4SF")])
(define_expand "sse_iorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (ior:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (ior:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
"TARGET_SSE"
"")
(define_insn "*sse_iorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "orps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_iorsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (ior:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"orps\t{%2, %0|%0, %2}"
@@ -19541,27 +19633,16 @@
(set_attr "mode" "V4SF")])
(define_expand "sse_xorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "") 0)
- (xor:TI (subreg:TI (match_operand:V4SF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V4SF 2 "nonimmediate_operand" "") 0)))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (xor:V4SF (match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
"")
(define_insn "*sse_xorv4sf3"
- [(set (subreg:TI (match_operand:V4SF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "xorps\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*sse_xorsf3"
- [(set (subreg:TI (match_operand:SF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V4SF 0 "register_operand" "=x")
+ (xor:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "%0")
+ (match_operand:V4SF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"xorps\t{%2, %0|%0, %2}"
@@ -19571,26 +19652,16 @@
;; SSE2 double precision floating point logical operation
(define_expand "sse2_andv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (and:TI (subreg:TI (match_operand:V2DF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (and:V2DF (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_andv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "andpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse2_andv2df3"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=x") 0)
- (and:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (and:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"andpd\t{%2, %0|%0, %2}"
@@ -19598,51 +19669,32 @@
(set_attr "mode" "V2DF")])
(define_expand "sse2_nandv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (and:TI (not:TI (subreg:TI (match_operand:V2DF 1 "register_operand" "") 0))
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (and:V2DF (not:V2DF (match_operand:V2DF 1 "register_operand" ""))
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_nandv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2"
- "andnpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse_nandti3_df"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=Y") 0)
- (and:TI (not:TI (match_operand:TI 1 "register_operand" "0"))
- (match_operand:TI 2 "nonimmediate_operand" "Ym")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (and:V2DF (not:V2DF (match_operand:V2DF 1 "register_operand" "0"))
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2"
"andnpd\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "V2DF")])
(define_expand "sse2_iorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (ior:TI (subreg:TI (match_operand:V2DF 1 "register_operand" "") 0)
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (ior:V2DF (match_operand:V2DF 1 "register_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_iorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "orpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse2_iordf3"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=x") 0)
- (ior:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (ior:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"orpd\t{%2, %0|%0, %2}"
@@ -19650,26 +19702,16 @@
(set_attr "mode" "V2DF")])
(define_expand "sse2_xorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "") 0)
- (xor:TI (subreg:TI (match_operand:V2DF 1 "nonimmediate_operand" "") 0)
- (subreg:TI (match_operand:V2DF 2 "nonimmediate_operand" "") 0)))]
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (xor:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "")
+ (match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"")
(define_insn "*sse2_xorv2df3"
- [(set (subreg:TI (match_operand:V2DF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2
- && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
- "xorpd\t{%2, %0|%0, %2}"
- [(set_attr "type" "sselog")
- (set_attr "mode" "V2DF")])
-
-(define_insn "*sse2_xordf3"
- [(set (subreg:TI (match_operand:DF 0 "register_operand" "=x") 0)
- (xor:TI (match_operand:TI 1 "nonimmediate_operand" "%0")
- (match_operand:TI 2 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:V2DF 0 "register_operand" "=x")
+ (xor:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "%0")
+ (match_operand:V2DF 2 "nonimmediate_operand" "xm")))]
"TARGET_SSE2
&& (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)"
"xorpd\t{%2, %0|%0, %2}"
diff --git a/contrib/gcc/config/i386/t-rtems-i386 b/contrib/gcc/config/i386/t-rtems-i386
index b57f4fd82dfa..d32928c53aa6 100644
--- a/contrib/gcc/config/i386/t-rtems-i386
+++ b/contrib/gcc/config/i386/t-rtems-i386
@@ -36,17 +36,17 @@ xp-bit.c: $(srcdir)/config/fp-bit.c
echo '#define EXTENDED_FLOAT_STUBS' > xp-bit.c
cat $(srcdir)/config/fp-bit.c >> xp-bit.c
-MULTILIB_OPTIONS = mcpu=i486/mcpu=pentium/mcpu=pentiumpro/mcpu=k6/mcpu=athlon \
+MULTILIB_OPTIONS = mtune=i486/mtune=pentium/mtune=pentiumpro/mtune=k6/mtune=athlon \
msoft-float mno-fp-ret-in-387
MULTILIB_DIRNAMES= m486 mpentium mpentiumpro k6 athlon soft-float nofp
MULTILIB_MATCHES = msoft-float=mno-m80387
MULTILIB_EXCEPTIONS = \
mno-fp-ret-in-387 \
-mcpu=i486/*mno-fp-ret-in-387* \
-mcpu=pentium/*msoft-float* mcpu=pentium/*mno-fp-ret-in-387* \
-mcpu=pentiumpro/*msoft-float* mcpu=pentiumpro/*mno-fp-ret-in-387* \
-mcpu=k6/*msoft-float* mcpu=k6/*mno-fp-ret-in-387* \
-mcpu=athlon/*msoft-float* mcpu=athlon/*mno-fp-ret-in-387*
+mtune=i486/*mno-fp-ret-in-387* \
+mtune=pentium/*msoft-float* mtune=pentium/*mno-fp-ret-in-387* \
+mtune=pentiumpro/*msoft-float* mtune=pentiumpro/*mno-fp-ret-in-387* \
+mtune=k6/*msoft-float* mtune=k6/*mno-fp-ret-in-387* \
+mtune=athlon/*msoft-float* mtune=athlon/*mno-fp-ret-in-387*
EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
diff --git a/contrib/gcc/config/i386/xmmintrin.h b/contrib/gcc/config/i386/xmmintrin.h
index 1bc887830de7..921806f706a0 100644
--- a/contrib/gcc/config/i386/xmmintrin.h
+++ b/contrib/gcc/config/i386/xmmintrin.h
@@ -38,10 +38,10 @@
#include <mmintrin.h>
/* The data type intended for user use. */
-typedef int __m128 __attribute__ ((__mode__(__V4SF__)));
+typedef float __m128 __attribute__ ((__mode__(__V4SF__)));
/* Internal data types for implementing the intrinsics. */
-typedef int __v4sf __attribute__ ((__mode__(__V4SF__)));
+typedef float __v4sf __attribute__ ((__mode__(__V4SF__)));
/* Create a selector for use with the SHUFPS instruction. */
#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
diff --git a/contrib/gcc/config/ia64/ia64.c b/contrib/gcc/config/ia64/ia64.c
index 19c5e92b161f..c215b19d868a 100644
--- a/contrib/gcc/config/ia64/ia64.c
+++ b/contrib/gcc/config/ia64/ia64.c
@@ -390,20 +390,55 @@ call_operand (rtx op, enum machine_mode mode)
int
sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
+ HOST_WIDE_INT offset = 0, size = 0;
+
switch (GET_CODE (op))
{
case CONST:
- if (GET_CODE (XEXP (op, 0)) != PLUS
- || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
break;
- op = XEXP (XEXP (op, 0), 0);
+ offset = INTVAL (XEXP (op, 1));
+ op = XEXP (op, 0);
/* FALLTHRU */
case SYMBOL_REF:
if (CONSTANT_POOL_ADDRESS_P (op))
- return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
+ {
+ size = GET_MODE_SIZE (get_pool_mode (op));
+ if (size > ia64_section_threshold)
+ return false;
+ }
else
- return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
+ {
+ tree t;
+
+ if (!SYMBOL_REF_LOCAL_P (op) || !SYMBOL_REF_SMALL_P (op))
+ return false;
+
+ /* Note that in addition to DECLs, we can get various forms
+ of constants here. */
+ t = SYMBOL_REF_DECL (op);
+ if (DECL_P (t))
+ t = DECL_SIZE_UNIT (t);
+ else
+ t = TYPE_SIZE_UNIT (TREE_TYPE (t));
+ if (t && host_integerp (t, 0))
+ {
+ size = tree_low_cst (t, 0);
+ if (size < 0)
+ size = 0;
+ }
+ }
+
+ /* Deny the stupid user trick of addressing outside the object. Such
+ things quickly result in GPREL22 relocation overflows. Of course,
+ they're also highly undefined. From a pure pedant's point of view
+ they deserve a slap on the wrist (such as provided by a relocation
+ overflow), but that just leads to bugzilla noise. */
+ return (offset >= 0 && offset <= size);
default:
break;
@@ -3154,10 +3189,13 @@ ia64_expand_epilogue (int sibcall_p)
preserve those input registers used as arguments to the sibling call.
It is unclear how to compute that number here. */
if (current_frame_info.n_input_regs != 0)
- emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
- GEN_INT (0), GEN_INT (0),
- GEN_INT (current_frame_info.n_input_regs),
- GEN_INT (0)));
+ {
+ rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
+ insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
+ const0_rtx, const0_rtx,
+ n_inputs, const0_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
}
}
@@ -3283,15 +3321,16 @@ static bool
ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
{
if (size == POINTER_SIZE / BITS_PER_UNIT
- && aligned_p
&& !(TARGET_NO_PIC || TARGET_AUTO_PIC)
&& GET_CODE (x) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (x))
{
- if (POINTER_SIZE == 32)
- fputs ("\tdata4\t@fptr(", asm_out_file);
- else
- fputs ("\tdata8\t@fptr(", asm_out_file);
+ static const char * const directive[2][2] = {
+ /* 64-bit pointer */ /* 32-bit pointer */
+ { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
+ { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
+ };
+ fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
output_addr_const (asm_out_file, x);
fputs (")\n", asm_out_file);
return true;
@@ -3917,6 +3956,12 @@ ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
static bool
ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
+ /* We can't perform a sibcall if the current function has the syscall_linkage
+ attribute. */
+ if (lookup_attribute ("syscall_linkage",
+ TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
+ return false;
+
/* We must always return with our current GP. This means we can
only sibcall to functions defined in the current module. */
return decl && (*targetm.binds_local_p) (decl);
@@ -7782,13 +7827,24 @@ process_set (FILE *asm_out_file, rtx pat)
{
dest_regno = REGNO (dest);
- /* If this isn't the final destination for ar.pfs, the alloc
- shouldn't have been marked frame related. */
- if (dest_regno != current_frame_info.reg_save_ar_pfs)
- abort ();
-
- fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
- ia64_dbx_register_number (dest_regno));
+ /* If this is the final destination for ar.pfs, then this must
+ be the alloc in the prologue. */
+ if (dest_regno == current_frame_info.reg_save_ar_pfs)
+ fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
+ ia64_dbx_register_number (dest_regno));
+ else
+ {
+ /* This must be an alloc before a sibcall. We must drop the
+ old frame info. The easiest way to drop the old frame
+ info is to ensure we had a ".restore sp" directive
+ followed by a new prologue. If the procedure doesn't
+ have a memory-stack frame, we'll issue a dummy ".restore
+ sp" now. */
+ if (current_frame_info.total_size == 0 && !frame_pointer_needed)
+ /* if haven't done process_epilogue() yet, do it now */
+ process_epilogue ();
+ fprintf (asm_out_file, "\t.prologue\n");
+ }
return 1;
}
diff --git a/contrib/gcc/config/ia64/t-glibc b/contrib/gcc/config/ia64/t-glibc
index a1056628b501..df4fe9c4404a 100644
--- a/contrib/gcc/config/ia64/t-glibc
+++ b/contrib/gcc/config/ia64/t-glibc
@@ -1 +1,3 @@
-LIB2ADDEH += $(srcdir)/config/ia64/fde-glibc.c
+# Use system libunwind library on IA-64 GLIBC based system.
+LIB2ADDEH = $(srcdir)/unwind-sjlj.c $(srcdir)/unwind-c.c \
+ $(srcdir)/unwind-compat.c
diff --git a/contrib/gcc/config/ia64/t-glibc-libunwind b/contrib/gcc/config/ia64/t-glibc-libunwind
new file mode 100644
index 000000000000..df78f1d094f8
--- /dev/null
+++ b/contrib/gcc/config/ia64/t-glibc-libunwind
@@ -0,0 +1,4 @@
+# Build libunwind for IA-64 GLIBC based system.
+LIBUNWIND = $(srcdir)/config/ia64/fde-glibc.c \
+ $(srcdir)/config/ia64/unwind-ia64.c
+LIBUNWINDDEP = unwind.inc
diff --git a/contrib/gcc/config/ia64/t-hpux b/contrib/gcc/config/ia64/t-hpux
index 597c2acbe2ae..d89f174592c0 100644
--- a/contrib/gcc/config/ia64/t-hpux
+++ b/contrib/gcc/config/ia64/t-hpux
@@ -23,6 +23,8 @@ LIBGCC1_TEST =
# We do not want to include the EH stuff that linux uses, we want to use
# the HP-UX libunwind library.
+T_CFLAGS += -DUSE_LIBUNWIND_EXCEPTIONS
+
LIB2ADDEH =
SHLIB_EXT = .so
diff --git a/contrib/gcc/config/ia64/unwind-ia64.c b/contrib/gcc/config/ia64/unwind-ia64.c
index d981d8c3e91a..a49652e99ab8 100644
--- a/contrib/gcc/config/ia64/unwind-ia64.c
+++ b/contrib/gcc/config/ia64/unwind-ia64.c
@@ -37,6 +37,7 @@
#include "tm.h"
#include "unwind.h"
#include "unwind-ia64.h"
+#include "unwind-compat.h"
#include "ia64intrin.h"
/* This isn't thread safe, but nice for occasional tests. */
@@ -2274,6 +2275,8 @@ uw_install_context (struct _Unwind_Context *current __attribute__((unused)),
"(p6) ldf.fill f22 = [r28] \n\t"
"cmp.ne p7, p0 = r0, r29 \n\t"
";; \n\t"
+ "ld8 r27 = [r20], 8 \n\t"
+ ";; \n\t"
"ld8 r28 = [r20], 8 \n\t"
"(p7) ldf.fill f23 = [r29] \n\t"
"cmp.ne p6, p0 = r0, r22 \n\t"
@@ -2381,4 +2384,24 @@ uw_identify_context (struct _Unwind_Context *context)
}
#include "unwind.inc"
+
+#if defined (USE_GAS_SYMVER) && defined (SHARED) && defined (USE_LIBUNWIND_EXCEPTIONS)
+alias (_Unwind_Backtrace);
+alias (_Unwind_DeleteException);
+alias (_Unwind_FindEnclosingFunction);
+alias (_Unwind_FindTableEntry);
+alias (_Unwind_ForcedUnwind);
+alias (_Unwind_GetBSP);
+alias (_Unwind_GetCFA);
+alias (_Unwind_GetGR);
+alias (_Unwind_GetIP);
+alias (_Unwind_GetLanguageSpecificData);
+alias (_Unwind_GetRegionStart);
+alias (_Unwind_RaiseException);
+alias (_Unwind_Resume);
+alias (_Unwind_Resume_or_Rethrow);
+alias (_Unwind_SetGR);
+alias (_Unwind_SetIP);
+#endif
+
#endif
diff --git a/contrib/gcc/config/ia64/unwind-ia64.h b/contrib/gcc/config/ia64/unwind-ia64.h
index b56b38c45c41..053829f11e94 100644
--- a/contrib/gcc/config/ia64/unwind-ia64.h
+++ b/contrib/gcc/config/ia64/unwind-ia64.h
@@ -28,4 +28,5 @@ struct unw_table_entry
extern struct unw_table_entry *
_Unwind_FindTableEntry (void *pc, unsigned long *segment_base,
- unsigned long *gp);
+ unsigned long *gp)
+ __attribute__ ((__visibility__ ("hidden")));
diff --git a/contrib/gcc/config/rs6000/aix.h b/contrib/gcc/config/rs6000/aix.h
index f189407b3f48..b14107f523a6 100644
--- a/contrib/gcc/config/rs6000/aix.h
+++ b/contrib/gcc/config/rs6000/aix.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler,
for IBM RS/6000 POWER running AIX.
- Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
@@ -175,15 +175,15 @@
#define JUMP_TABLES_IN_TEXT_SECTION 1
/* Enable AIX XL compiler calling convention breakage compatibility. */
-#undef TARGET_XL_CALL
-#define MASK_XL_CALL 0x40000000
-#define TARGET_XL_CALL (target_flags & MASK_XL_CALL)
+#undef TARGET_XL_COMPAT
+#define MASK_XL_COMPAT 0x40000000
+#define TARGET_XL_COMPAT (target_flags & MASK_XL_COMPAT)
#undef SUBTARGET_SWITCHES
#define SUBTARGET_SWITCHES \
- {"xl-call", MASK_XL_CALL, \
- N_("Always pass floating-point arguments in memory") }, \
- {"no-xl-call", - MASK_XL_CALL, \
- N_("Don't always pass floating-point arguments in memory") }, \
+ {"xl-compat", MASK_XL_COMPAT, \
+ N_("Conform more closely to IBM XLC semantics") }, \
+ {"no-xl-compat", - MASK_XL_COMPAT, \
+ N_("Default GCC semantics that differ from IBM XLC") }, \
SUBSUBTARGET_SWITCHES
#define SUBSUBTARGET_SWITCHES
@@ -209,7 +209,7 @@
code that does the save/restore is generated by the linker, so
we have no good way to determine at compile time what to do. */
-#ifdef __powerpc64__
+#ifdef __64BIT__
#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
do { \
if ((FS)->regs.reg[2].how == REG_UNSAVED) \
diff --git a/contrib/gcc/config/rs6000/aix41.h b/contrib/gcc/config/rs6000/aix41.h
index 373c10c22ffa..542f92841cb8 100644
--- a/contrib/gcc/config/rs6000/aix41.h
+++ b/contrib/gcc/config/rs6000/aix41.h
@@ -98,3 +98,7 @@
#undef RS6000_CALL_GLUE
#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+/* The IBM AIX 4.x assembler doesn't support forward references in
+ .set directives. We handle this by deferring the output of .set
+ directives to the end of the compilation unit. */
+#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
diff --git a/contrib/gcc/config/rs6000/aix43.h b/contrib/gcc/config/rs6000/aix43.h
index a76e694c1ee7..50bd304dd7ca 100644
--- a/contrib/gcc/config/rs6000/aix43.h
+++ b/contrib/gcc/config/rs6000/aix43.h
@@ -187,3 +187,8 @@ do { \
#undef LD_INIT_SWITCH
#define LD_INIT_SWITCH "-binitfini"
+
+/* The IBM AIX 4.x assembler doesn't support forward references in
+ .set directives. We handle this by deferring the output of .set
+ directives to the end of the compilation unit. */
+#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
diff --git a/contrib/gcc/config/rs6000/aix52.h b/contrib/gcc/config/rs6000/aix52.h
index c06665066b31..6f12619e2da6 100644
--- a/contrib/gcc/config/rs6000/aix52.h
+++ b/contrib/gcc/config/rs6000/aix52.h
@@ -193,3 +193,7 @@ do { \
#undef TARGET_C99_FUNCTIONS
#define TARGET_C99_FUNCTIONS 1
+#ifndef _AIX52
+extern long long int atoll(const char *);
+#endif
+
diff --git a/contrib/gcc/config/rs6000/altivec.h b/contrib/gcc/config/rs6000/altivec.h
index 04d120dd901d..779b4280ba50 100644
--- a/contrib/gcc/config/rs6000/altivec.h
+++ b/contrib/gcc/config/rs6000/altivec.h
@@ -32,46 +32,29 @@
#ifndef _ALTIVEC_H
#define _ALTIVEC_H 1
-/* Required by Motorola specs. */
-#define __VEC__ 10206
-
-#ifndef __ALTIVEC__
-#define __ALTIVEC__ 1
+#if !defined(__VEC__) || !defined(__ALTIVEC__)
+#error Use the "-maltivec" flag to enable PowerPC AltiVec support
#endif
-#define __vector __attribute__((vector_size(16)))
+/* If __APPLE_ALTIVEC__ is defined, the compiler supports 'vector',
+ 'pixel' and 'bool' as context-sensitive AltiVec keywords (in
+ non-AltiVec contexts, they revert to their original meanings,
+ if any), so we do not need to define them as macros. */
-/* You are allowed to undef this for C++ compatibility. */
+#if !defined(__APPLE_ALTIVEC__)
+/* You are allowed to undef these for C++ compatibility. */
#define vector __vector
+#define pixel __pixel
+#define bool __bool
+#endif
-#define bool signed
-#define pixel unsigned short
-#define __pixel unsigned short
-
-/* Dummy prototype. */
-extern int __altivec_link_error_invalid_argument ();
-
-/* Helper macros. */
+/* Condition register codes for AltiVec predicates. */
#define __CR6_EQ 0
#define __CR6_EQ_REV 1
#define __CR6_LT 2
#define __CR6_LT_REV 3
-#define __bin_args_eq(xtype, x, ytype, y) \
- (__builtin_types_compatible_p (xtype, typeof (x)) \
- && __builtin_types_compatible_p (ytype, typeof (y)))
-
-#define __un_args_eq(xtype, x) \
- __builtin_types_compatible_p (xtype, typeof (x))
-
-#define __tern_args_eq(xtype, x, ytype, y, ztype, z) \
- (__builtin_types_compatible_p (xtype, typeof (x)) \
- && __builtin_types_compatible_p (ytype, typeof (y)) \
- && __builtin_types_compatible_p (ztype, typeof (z)))
-
-#define __ch(x, y, z) __builtin_choose_expr (x, y, z)
-
/* These are easy... Same exact arguments. */
#define vec_vaddcuw vec_addc
@@ -122,1077 +105,1434 @@ extern "C++" {
/* Prototypes for builtins that take literals and must always be
inlined. */
-inline vector float vec_ctf (vector unsigned int, const char) __attribute__ ((always_inline));
-inline vector float vec_ctf (vector signed int, const char) __attribute__ ((always_inline));
-inline vector float vec_vcfsx (vector signed int a1, const char a2) __attribute__ ((always_inline));
-inline vector float vec_vcfux (vector unsigned int a1, const char a2) __attribute__ ((always_inline));
-inline vector signed int vec_cts (vector float, const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_ctu (vector float, const char) __attribute__ ((always_inline));
-inline void vec_dss (const char) __attribute__ ((always_inline));
-
-inline void vec_dst (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dst (float *, int, const char) __attribute__ ((always_inline));
-
-inline void vec_dstst (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstst (float *, int, const char) __attribute__ ((always_inline));
-
-inline void vec_dststt (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dststt (float *, int, const char) __attribute__ ((always_inline));
-
-inline void vec_dstt (vector unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (vector float *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed char *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed short *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed int *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (unsigned long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (signed long *, int, const char) __attribute__ ((always_inline));
-inline void vec_dstt (float *, int, const char) __attribute__ ((always_inline));
-
-inline vector float vec_sld (vector float, vector float, const char) __attribute__ ((always_inline));
-inline vector signed int vec_sld (vector signed int, vector signed int, const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_sld (vector unsigned int, vector unsigned int, const char) __attribute__ ((always_inline));
-inline vector signed short vec_sld (vector signed short, vector signed short, const char) __attribute__ ((always_inline));
-inline vector unsigned short vec_sld (vector unsigned short, vector unsigned short, const char) __attribute__ ((always_inline));
-inline vector signed char vec_sld (vector signed char, vector signed char, const char) __attribute__ ((always_inline));
-inline vector unsigned char vec_sld (vector unsigned char, vector unsigned char, const char) __attribute__ ((always_inline));
-inline vector signed char vec_splat (vector signed char, const char) __attribute__ ((always_inline));
-inline vector unsigned char vec_splat (vector unsigned char, const char) __attribute__ ((always_inline));
-inline vector signed short vec_splat (vector signed short, const char) __attribute__ ((always_inline));
-inline vector unsigned short vec_splat (vector unsigned short, const char) __attribute__ ((always_inline));
-inline vector float vec_splat (vector float, const char) __attribute__ ((always_inline));
-inline vector signed int vec_splat (vector signed int, const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_splat (vector unsigned int, const char) __attribute__ ((always_inline));
-inline vector signed char vec_splat_s8 (const char) __attribute__ ((always_inline));
-inline vector signed short vec_splat_s16 (const char) __attribute__ ((always_inline));
-inline vector signed int vec_splat_s32 (const char) __attribute__ ((always_inline));
-inline vector unsigned char vec_splat_u8 (const char) __attribute__ ((always_inline));
-inline vector unsigned short vec_splat_u16 (const char) __attribute__ ((always_inline));
-inline vector unsigned int vec_splat_u32 (const char) __attribute__ ((always_inline));
-inline vector float vec_vspltw (vector float a1, const char a2) __attribute__ ((always_inline));
-inline vector signed int vec_vspltw (vector signed int a1, const char a2) __attribute__ ((always_inline));
-inline vector unsigned int vec_vspltw (vector unsigned int a1, const char a2) __attribute__ ((always_inline));
-inline vector signed short vec_vsplth (vector signed short a1, const char a2) __attribute__ ((always_inline));
-inline vector unsigned short vec_vsplth (vector unsigned short a1, const char a2) __attribute__ ((always_inline));
-inline vector signed char vec_vspltb (vector signed char a1, const char a2) __attribute__ ((always_inline));
-inline vector unsigned char vec_vspltb (vector unsigned char a1, const char a2) __attribute__ ((always_inline));
+inline __vector float vec_ctf (__vector unsigned int, const int) __attribute__ ((always_inline));
+inline __vector float vec_ctf (__vector signed int, const int) __attribute__ ((always_inline));
+inline __vector float vec_vcfsx (__vector signed int a1, const int a2) __attribute__ ((always_inline));
+inline __vector float vec_vcfux (__vector unsigned int a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed int vec_cts (__vector float, const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_ctu (__vector float, const int) __attribute__ ((always_inline));
+inline void vec_dss (const int) __attribute__ ((always_inline));
+
+inline void vec_dst (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const float *, int, const int) __attribute__ ((always_inline));
+
+inline void vec_dstst (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const float *, int, const int) __attribute__ ((always_inline));
+
+inline void vec_dststt (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const float *, int, const int) __attribute__ ((always_inline));
+
+inline void vec_dstt (const __vector unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __bool char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector signed short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __bool short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __pixel *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector signed int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector __bool int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const __vector float *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const signed char *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const short *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const long *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const float *, int, const int) __attribute__ ((always_inline));
+
+inline __vector float vec_sld (__vector float, __vector float, const int) __attribute__ ((always_inline));
+inline __vector signed int vec_sld (__vector signed int, __vector signed int, const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_sld (__vector unsigned int, __vector unsigned int, const int) __attribute__ ((always_inline));
+inline __vector __bool int vec_sld (__vector __bool int, __vector __bool int, const int) __attribute__ ((always_inline));
+inline __vector signed short vec_sld (__vector signed short, __vector signed short, const int) __attribute__ ((always_inline));
+inline __vector unsigned short vec_sld (__vector unsigned short, __vector unsigned short, const int) __attribute__ ((always_inline));
+inline __vector __bool short vec_sld (__vector __bool short, __vector __bool short, const int) __attribute__ ((always_inline));
+inline __vector __pixel vec_sld (__vector __pixel, __vector __pixel, const int) __attribute__ ((always_inline));
+inline __vector signed char vec_sld (__vector signed char, __vector signed char, const int) __attribute__ ((always_inline));
+inline __vector unsigned char vec_sld (__vector unsigned char, __vector unsigned char, const int) __attribute__ ((always_inline));
+inline __vector __bool char vec_sld (__vector __bool char, __vector __bool char, const int) __attribute__ ((always_inline));
+inline __vector signed char vec_splat (__vector signed char, const int) __attribute__ ((always_inline));
+inline __vector unsigned char vec_splat (__vector unsigned char, const int) __attribute__ ((always_inline));
+inline __vector __bool char vec_splat (__vector __bool char, const int) __attribute__ ((always_inline));
+inline __vector signed short vec_splat (__vector signed short, const int) __attribute__ ((always_inline));
+inline __vector unsigned short vec_splat (__vector unsigned short, const int) __attribute__ ((always_inline));
+inline __vector __bool short vec_splat (__vector __bool short, const int) __attribute__ ((always_inline));
+inline __vector __pixel vec_splat (__vector __pixel, const int) __attribute__ ((always_inline));
+inline __vector float vec_splat (__vector float, const int) __attribute__ ((always_inline));
+inline __vector signed int vec_splat (__vector signed int, const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_splat (__vector unsigned int, const int) __attribute__ ((always_inline));
+inline __vector __bool int vec_splat (__vector __bool int, const int) __attribute__ ((always_inline));
+inline __vector signed char vec_splat_s8 (const int) __attribute__ ((always_inline));
+inline __vector signed short vec_splat_s16 (const int) __attribute__ ((always_inline));
+inline __vector signed int vec_splat_s32 (const int) __attribute__ ((always_inline));
+inline __vector unsigned char vec_splat_u8 (const int) __attribute__ ((always_inline));
+inline __vector unsigned short vec_splat_u16 (const int) __attribute__ ((always_inline));
+inline __vector unsigned int vec_splat_u32 (const int) __attribute__ ((always_inline));
+inline __vector float vec_vspltw (__vector float a1, const int a2) __attribute__ ((always_inline));
+inline __vector __bool int vec_vspltw (__vector __bool int a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed int vec_vspltw (__vector signed int a1, const int a2) __attribute__ ((always_inline));
+inline __vector unsigned int vec_vspltw (__vector unsigned int a1, const int a2) __attribute__ ((always_inline));
+inline __vector __bool short vec_vsplth (__vector __bool short a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed short vec_vsplth (__vector signed short a1, const int a2) __attribute__ ((always_inline));
+inline __vector unsigned short vec_vsplth (__vector unsigned short a1, const int a2) __attribute__ ((always_inline));
+inline __vector __pixel vec_vsplth (__vector __pixel a1, const int a2) __attribute__ ((always_inline));
+inline __vector __bool char vec_vspltb (__vector __bool char a1, const int a2) __attribute__ ((always_inline));
+inline __vector signed char vec_vspltb (__vector signed char a1, const int a2) __attribute__ ((always_inline));
+inline __vector unsigned char vec_vspltb (__vector unsigned char a1, const int a2) __attribute__ ((always_inline));
+
+/* vec_step */
+
+template<typename _Tp>
+struct __vec_step_help
+{
+ // All proper __vector types will specialize _S_elem.
+};
+
+template<>
+struct __vec_step_help<__vector signed short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector unsigned short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector __bool short>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector __pixel>
+{
+ static const int _S_elem = 8;
+};
+
+template<>
+struct __vec_step_help<__vector signed int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<__vector unsigned int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<__vector __bool int>
+{
+ static const int _S_elem = 4;
+};
+
+template<>
+struct __vec_step_help<__vector unsigned char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<__vector signed char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<__vector __bool char>
+{
+ static const int _S_elem = 16;
+};
+
+template<>
+struct __vec_step_help<__vector float>
+{
+ static const int _S_elem = 4;
+};
+
+#define vec_step(t) __vec_step_help<typeof(t)>::_S_elem
/* vec_abs */
-inline vector signed char
-vec_abs (vector signed char a1)
+inline __vector signed char
+vec_abs (__vector signed char a1)
{
return __builtin_altivec_abs_v16qi (a1);
}
-inline vector signed short
-vec_abs (vector signed short a1)
+inline __vector signed short
+vec_abs (__vector signed short a1)
{
return __builtin_altivec_abs_v8hi (a1);
}
-inline vector signed int
-vec_abs (vector signed int a1)
+inline __vector signed int
+vec_abs (__vector signed int a1)
{
return __builtin_altivec_abs_v4si (a1);
}
-inline vector float
-vec_abs (vector float a1)
+inline __vector float
+vec_abs (__vector float a1)
{
return __builtin_altivec_abs_v4sf (a1);
}
/* vec_abss */
-inline vector signed char
-vec_abss (vector signed char a1)
+inline __vector signed char
+vec_abss (__vector signed char a1)
{
return __builtin_altivec_abss_v16qi (a1);
}
-inline vector signed short
-vec_abss (vector signed short a1)
+inline __vector signed short
+vec_abss (__vector signed short a1)
{
return __builtin_altivec_abss_v8hi (a1);
}
-inline vector signed int
-vec_abss (vector signed int a1)
+inline __vector signed int
+vec_abss (__vector signed int a1)
{
return __builtin_altivec_abss_v4si (a1);
}
/* vec_add */
-inline vector signed char
-vec_add (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_add (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_add (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_add (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_add (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_add (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_add (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed short
+vec_add (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_add (vector signed char a1, vector unsigned char a2)
+inline __vector signed short
+vec_add (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_add (vector unsigned char a1, vector signed char a2)
+inline __vector signed short
+vec_add (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_add (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_add (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_add (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_add (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_add (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_add (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_add (vector unsigned short a1, vector signed short a2)
+inline __vector signed int
+vec_add (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_add (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed int
+vec_add (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_add (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_add (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_add (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_add (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_add (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_add (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_add (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_add (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_add (vector float a1, vector float a2)
+inline __vector float
+vec_add (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vaddfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vaddfp */
-inline vector float
-vec_vaddfp (vector float a1, vector float a2)
+inline __vector float
+vec_vaddfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vaddfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vadduwm */
-inline vector signed int
-vec_vadduwm (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vadduwm (__vector __bool int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduwm (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vadduwm (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduwm (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_vadduwm (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduwm (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vadduwm (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vadduwm (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vadduwm (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vadduwm ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vadduhm */
-inline vector signed short
-vec_vadduhm (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vadduhm (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhm (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vadduhm (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhm (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_vadduhm (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhm (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vadduhm (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector unsigned short
+vec_vadduhm (__vector unsigned short a1, __vector __bool short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector unsigned short
+vec_vadduhm (__vector unsigned short a1, __vector unsigned short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vadduhm ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vaddubm */
-inline vector signed char
-vec_vaddubm (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vaddubm (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddubm (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddubm (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubm (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubm (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubm (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vaddubm (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubm (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubm (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubm ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_addc */
-inline vector unsigned int
-vec_addc (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_addc (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vaddcuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vaddcuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_adds */
-inline vector unsigned char
-vec_adds (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_adds (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_adds (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_adds (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_adds (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_adds (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_adds (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned short
+vec_adds (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_adds (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned short
+vec_adds (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_adds (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_adds (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed char
-vec_adds (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_adds (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_adds (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_adds (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_adds (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_adds (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_adds (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_adds (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_adds (vector signed short a1, vector signed short a2)
+inline __vector unsigned int
+vec_adds (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_adds (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_adds (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_adds (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_adds (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_adds (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_adds (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_adds (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_adds (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vaddsws */
-inline vector signed int
-vec_vaddsws (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vaddsws (__vector __bool int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vaddsws (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vaddsws (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vaddsws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vadduws */
-inline vector unsigned int
-vec_vadduws (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vadduws (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduws (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vadduws (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vadduws (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vadduws (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vadduws ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vaddshs */
-inline vector signed short
-vec_vaddshs (vector signed short a1, vector signed short a2)
+
+inline __vector signed short
+vec_vaddshs (__vector __bool short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vaddshs (__vector signed short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vaddshs (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vaddshs ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vadduhs */
-inline vector unsigned short
-vec_vadduhs (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vadduhs (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhs (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vadduhs (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vadduhs (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vadduhs (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vadduhs ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vaddsbs */
-inline vector signed char
-vec_vaddsbs (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vaddsbs (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddsbs (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vaddsbs (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vaddsbs ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vaddubs */
-inline vector unsigned char
-vec_vaddubs (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubs (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubs (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vaddubs (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vaddubs (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vaddubs (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vaddubs ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_and */
-inline vector float
-vec_and (vector float a1, vector float a2)
+inline __vector float
+vec_and (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_and (vector float a1, vector signed int a2)
+inline __vector float
+vec_and (__vector float a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_and (vector signed int a1, vector float a2)
+inline __vector float
+vec_and (__vector __bool int a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_and (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_and (__vector __bool int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_and (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_and (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_and (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_and (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_and (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_and (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_and (vector signed short a1, vector signed short a2)
+inline __vector unsigned int
+vec_and (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_and (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_and (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_and (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned int
+vec_and (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_and (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_and (__vector __bool short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_and (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_and (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_and (vector signed char a1, vector unsigned char a2)
+inline __vector signed short
+vec_and (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_and (vector unsigned char a1, vector signed char a2)
+inline __vector signed short
+vec_and (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_and (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_and (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned short
+vec_and (__vector unsigned short a1, __vector __bool short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned short
+vec_and (__vector unsigned short a1, __vector unsigned short a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_and (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_and (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_and (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_and (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_and (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_and (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_and (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vand ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_andc */
-inline vector float
-vec_andc (vector float a1, vector float a2)
+inline __vector float
+vec_andc (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_andc (__vector float a1, __vector __bool int a2)
+{
+ return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_andc (__vector __bool int a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_andc (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_andc (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_andc (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_andc (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_andc (__vector __bool int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_andc (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_andc (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_andc (vector float a1, vector signed int a2)
+inline __vector __bool short
+vec_andc (__vector __bool short a1, __vector __bool short a2)
{
- return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_andc (vector signed int a1, vector float a2)
+inline __vector signed short
+vec_andc (__vector __bool short a1, __vector signed short a2)
{
- return (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_andc (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_andc (__vector signed short a1, __vector __bool short a2)
{
- return (vector signed int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_andc (vector signed int a1, vector unsigned int a2)
+inline __vector signed short
+vec_andc (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_andc (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned short
+vec_andc (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_andc (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_andc (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_andc (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_andc (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_andc (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_andc (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_andc (vector unsigned short a1, vector signed short a2)
+inline __vector __bool char
+vec_andc (__vector __bool char a1, __vector __bool char a2)
{
- return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_andc (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed char
+vec_andc (__vector signed char a1, __vector __bool char a2)
{
- return (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_andc (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_andc (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_andc (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_andc (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_andc (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_andc (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_andc (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_andc (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vandc ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_avg */
-inline vector unsigned char
-vec_avg (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_avg (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_avg (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_avg (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_avg (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_avg (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_avg (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_avg (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_avg (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_avg (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_avg (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_avg (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vavgsw */
-inline vector signed int
-vec_vavgsw (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vavgsw (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vavgsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vavguw */
-inline vector unsigned int
-vec_vavguw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vavguw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vavguw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vavgsh */
-inline vector signed short
-vec_vavgsh (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vavgsh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vavgsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vavguh */
-inline vector unsigned short
-vec_vavguh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vavguh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vavguh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vavgsb */
-inline vector signed char
-vec_vavgsb (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vavgsb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vavgsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vavgub */
-inline vector unsigned char
-vec_vavgub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vavgub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vavgub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_ceil */
-inline vector float
-vec_ceil (vector float a1)
+inline __vector float
+vec_ceil (__vector float a1)
{
- return (vector float) __builtin_altivec_vrfip ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrfip ((__vector float) a1);
}
/* vec_cmpb */
-inline vector signed int
-vec_cmpb (vector float a1, vector float a2)
+inline __vector signed int
+vec_cmpb (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpbfp ((vector float) a1, (vector float) a2);
+ return (__vector signed int) __builtin_altivec_vcmpbfp ((__vector float) a1, (__vector float) a2);
}
/* vec_cmpeq */
-inline vector signed char
-vec_cmpeq (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_cmpeq (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_cmpeq (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_cmpeq (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_cmpeq (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_cmpeq (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_cmpeq (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_cmpeq (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_cmpeq (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_cmpeq (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpeq (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_cmpeq (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpeq (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmpeq (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpeqfp */
-inline vector signed int
-vec_vcmpeqfp (vector float a1, vector float a2)
+inline __vector __bool int
+vec_vcmpeqfp (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpeqfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpequw */
-inline vector signed int
-vec_vcmpequw (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_vcmpequw (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_vcmpequw (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_vcmpequw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpequw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vcmpequh */
-inline vector signed short
-vec_vcmpequh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vcmpequh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_vcmpequh (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_vcmpequh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpequh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vcmpequb */
-inline vector signed char
-vec_vcmpequb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vcmpequb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_vcmpequb (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_vcmpequb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpequb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_cmpge */
-inline vector signed int
-vec_cmpge (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmpge (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgefp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) a1, (__vector float) a2);
}
/* vec_cmpgt */
-inline vector signed char
-vec_cmpgt (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_cmpgt (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_cmpgt (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_cmpgt (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_cmpgt (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_cmpgt (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_cmpgt (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_cmpgt (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_cmpgt (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_cmpgt (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpgt (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_cmpgt (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_cmpgt (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmpgt (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpgtfp */
-inline vector signed int
-vec_vcmpgtfp (vector float a1, vector float a2)
+inline __vector __bool int
+vec_vcmpgtfp (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vcmpgtsw */
-inline vector signed int
-vec_vcmpgtsw (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_vcmpgtsw (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vcmpgtuw */
-inline vector signed int
-vec_vcmpgtuw (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_vcmpgtuw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vcmpgtsh */
-inline vector signed short
-vec_cmpgtsh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vcmpgtsh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vcmpgtuh */
-inline vector signed short
-vec_vcmpgtuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_vcmpgtuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vcmpgtsb */
-inline vector signed char
-vec_vcmpgtsb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vcmpgtsb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vcmpgtub */
-inline vector signed char
-vec_vcmpgtub (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_vcmpgtub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_cmple */
-inline vector signed int
-vec_cmple (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmple (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgefp ((vector float) a2, (vector float) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgefp ((__vector float) a2, (__vector float) a1);
}
/* vec_cmplt */
-inline vector signed char
-vec_cmplt (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_cmplt (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a2, (vector signed char) a1);
+ return (__vector __bool char) __builtin_altivec_vcmpgtub ((__vector signed char) a2, (__vector signed char) a1);
}
-inline vector signed char
-vec_cmplt (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_cmplt (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a2, (vector signed char) a1);
+ return (__vector __bool char) __builtin_altivec_vcmpgtsb ((__vector signed char) a2, (__vector signed char) a1);
}
-inline vector signed short
-vec_cmplt (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_cmplt (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a2, (vector signed short) a1);
+ return (__vector __bool short) __builtin_altivec_vcmpgtuh ((__vector signed short) a2, (__vector signed short) a1);
}
-inline vector signed short
-vec_cmplt (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_cmplt (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a2, (vector signed short) a1);
+ return (__vector __bool short) __builtin_altivec_vcmpgtsh ((__vector signed short) a2, (__vector signed short) a1);
}
-inline vector signed int
-vec_cmplt (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_cmplt (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a2, (vector signed int) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgtuw ((__vector signed int) a2, (__vector signed int) a1);
}
-inline vector signed int
-vec_cmplt (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_cmplt (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a2, (vector signed int) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgtsw ((__vector signed int) a2, (__vector signed int) a1);
}
-inline vector signed int
-vec_cmplt (vector float a1, vector float a2)
+inline __vector __bool int
+vec_cmplt (__vector float a1, __vector float a2)
{
- return (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a2, (vector float) a1);
+ return (__vector __bool int) __builtin_altivec_vcmpgtfp ((__vector float) a2, (__vector float) a1);
}
/* vec_ctf */
-inline vector float
-vec_ctf (vector unsigned int a1, const char a2)
+inline __vector float
+vec_ctf (__vector unsigned int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfux ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfux ((__vector signed int) a1, a2);
}
-inline vector float
-vec_ctf (vector signed int a1, const char a2)
+inline __vector float
+vec_ctf (__vector signed int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfsx ((__vector signed int) a1, a2);
}
/* vec_vcfsx */
-inline vector float
-vec_vcfsx (vector signed int a1, const char a2)
+inline __vector float
+vec_vcfsx (__vector signed int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfsx ((__vector signed int) a1, a2);
}
/* vec_vcfux */
-inline vector float
-vec_vcfux (vector unsigned int a1, const char a2)
+inline __vector float
+vec_vcfux (__vector unsigned int a1, const int a2)
{
- return (vector float) __builtin_altivec_vcfux ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vcfux ((__vector signed int) a1, a2);
}
/* vec_cts */
-inline vector signed int
-vec_cts (vector float a1, const char a2)
+inline __vector signed int
+vec_cts (__vector float a1, const int a2)
{
- return (vector signed int) __builtin_altivec_vctsxs ((vector float) a1, a2);
+ return (__vector signed int) __builtin_altivec_vctsxs ((__vector float) a1, a2);
}
/* vec_ctu */
-inline vector unsigned int
-vec_ctu (vector float a1, const char a2)
+inline __vector unsigned int
+vec_ctu (__vector float a1, const int a2)
{
- return (vector unsigned int) __builtin_altivec_vctuxs ((vector float) a1, a2);
+ return (__vector unsigned int) __builtin_altivec_vctuxs ((__vector float) a1, a2);
}
/* vec_dss */
inline void
-vec_dss (const char a1)
+vec_dss (const int a1)
{
__builtin_altivec_dss (a1);
}
@@ -1208,97 +1548,121 @@ vec_dssall (void)
/* vec_dst */
inline void
-vec_dst (vector unsigned char *a1, int a2, const char a3)
+vec_dst (const __vector unsigned char *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const __vector signed char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector signed char *a1, int a2, const char a3)
+vec_dst (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector unsigned short *a1, int a2, const char a3)
+vec_dst (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector signed short *a1, int a2, const char a3)
+vec_dst (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector unsigned int *a1, int a2, const char a3)
+vec_dst (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector signed int *a1, int a2, const char a3)
+vec_dst (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (vector float *a1, int a2, const char a3)
+vec_dst (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned char *a1, int a2, const char a3)
+vec_dst (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed char *a1, int a2, const char a3)
+vec_dst (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned short *a1, int a2, const char a3)
+vec_dst (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed short *a1, int a2, const char a3)
+vec_dst (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned int *a1, int a2, const char a3)
+vec_dst (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed int *a1, int a2, const char a3)
+vec_dst (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (unsigned long *a1, int a2, const char a3)
+vec_dst (const short *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (signed long *a1, int a2, const char a3)
+vec_dst (const unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
inline void
-vec_dst (float *a1, int a2, const char a3)
+vec_dst (const int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dst (const float *a1, int a2, const int a3)
{
__builtin_altivec_dst ((void *) a1, a2, a3);
}
@@ -1306,97 +1670,121 @@ vec_dst (float *a1, int a2, const char a3)
/* vec_dstst */
inline void
-vec_dstst (vector unsigned char *a1, int a2, const char a3)
+vec_dstst (const __vector unsigned char *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const __vector signed char *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector signed char *a1, int a2, const char a3)
+vec_dstst (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector unsigned short *a1, int a2, const char a3)
+vec_dstst (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector signed short *a1, int a2, const char a3)
+vec_dstst (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector unsigned int *a1, int a2, const char a3)
+vec_dstst (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector signed int *a1, int a2, const char a3)
+vec_dstst (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (vector float *a1, int a2, const char a3)
+vec_dstst (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned char *a1, int a2, const char a3)
+vec_dstst (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed char *a1, int a2, const char a3)
+vec_dstst (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned short *a1, int a2, const char a3)
+vec_dstst (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed short *a1, int a2, const char a3)
+vec_dstst (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned int *a1, int a2, const char a3)
+vec_dstst (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed int *a1, int a2, const char a3)
+vec_dstst (const short *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (unsigned long *a1, int a2, const char a3)
+vec_dstst (const unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (signed long *a1, int a2, const char a3)
+vec_dstst (const int *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
inline void
-vec_dstst (float *a1, int a2, const char a3)
+vec_dstst (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstst ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstst (const float *a1, int a2, const int a3)
{
__builtin_altivec_dstst ((void *) a1, a2, a3);
}
@@ -1404,97 +1792,121 @@ vec_dstst (float *a1, int a2, const char a3)
/* vec_dststt */
inline void
-vec_dststt (vector unsigned char *a1, int a2, const char a3)
+vec_dststt (const __vector unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector signed char *a1, int a2, const char a3)
+vec_dststt (const __vector signed char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector unsigned short *a1, int a2, const char a3)
+vec_dststt (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector signed short *a1, int a2, const char a3)
+vec_dststt (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector unsigned int *a1, int a2, const char a3)
+vec_dststt (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector signed int *a1, int a2, const char a3)
+vec_dststt (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (vector float *a1, int a2, const char a3)
+vec_dststt (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned char *a1, int a2, const char a3)
+vec_dststt (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed char *a1, int a2, const char a3)
+vec_dststt (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned short *a1, int a2, const char a3)
+vec_dststt (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed short *a1, int a2, const char a3)
+vec_dststt (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned int *a1, int a2, const char a3)
+vec_dststt (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed int *a1, int a2, const char a3)
+vec_dststt (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (unsigned long *a1, int a2, const char a3)
+vec_dststt (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (signed long *a1, int a2, const char a3)
+vec_dststt (const short *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
inline void
-vec_dststt (float *a1, int a2, const char a3)
+vec_dststt (const unsigned int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dststt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dststt (const float *a1, int a2, const int a3)
{
__builtin_altivec_dststt ((void *) a1, a2, a3);
}
@@ -1502,3561 +1914,4931 @@ vec_dststt (float *a1, int a2, const char a3)
/* vec_dstt */
inline void
-vec_dstt (vector unsigned char *a1, int a2, const char a3)
+vec_dstt (const __vector unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector signed char *a1, int a2, const char a3)
+vec_dstt (const __vector signed char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector unsigned short *a1, int a2, const char a3)
+vec_dstt (const __vector __bool char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector signed short *a1, int a2, const char a3)
+vec_dstt (const __vector unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector unsigned int *a1, int a2, const char a3)
+vec_dstt (const __vector signed short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector signed int *a1, int a2, const char a3)
+vec_dstt (const __vector __bool short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (vector float *a1, int a2, const char a3)
+vec_dstt (const __vector __pixel *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned char *a1, int a2, const char a3)
+vec_dstt (const __vector unsigned int *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed char *a1, int a2, const char a3)
+vec_dstt (const __vector signed int *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned short *a1, int a2, const char a3)
+vec_dstt (const __vector __bool int *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed short *a1, int a2, const char a3)
+vec_dstt (const __vector float *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned int *a1, int a2, const char a3)
+vec_dstt (const unsigned char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed int *a1, int a2, const char a3)
+vec_dstt (const signed char *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (unsigned long *a1, int a2, const char a3)
+vec_dstt (const unsigned short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (signed long *a1, int a2, const char a3)
+vec_dstt (const short *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
inline void
-vec_dstt (float *a1, int a2, const char a3)
+vec_dstt (const unsigned int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const int *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const unsigned long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const long *a1, int a2, const int a3)
+{
+ __builtin_altivec_dstt ((void *) a1, a2, a3);
+}
+
+inline void
+vec_dstt (const float *a1, int a2, const int a3)
{
__builtin_altivec_dstt ((void *) a1, a2, a3);
}
/* vec_expte */
-inline vector float
-vec_expte (vector float a1)
+inline __vector float
+vec_expte (__vector float a1)
{
- return (vector float) __builtin_altivec_vexptefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vexptefp ((__vector float) a1);
}
/* vec_floor */
-inline vector float
-vec_floor (vector float a1)
+inline __vector float
+vec_floor (__vector float a1)
{
- return (vector float) __builtin_altivec_vrfim ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrfim ((__vector float) a1);
}
/* vec_ld */
-inline vector float
-vec_ld (int a1, vector float *a2)
+inline __vector float
+vec_ld (int a1, const __vector float *a2)
+{
+ return (__vector float) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector float
+vec_ld (int a1, const float *a2)
+{
+ return (__vector float) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector __bool int
+vec_ld (int a1, const __vector __bool int *a2)
{
- return (vector float) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __bool int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector float
-vec_ld (int a1, float *a2)
+inline __vector signed int
+vec_ld (int a1, const __vector signed int *a2)
{
- return (vector float) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed int
-vec_ld (int a1, vector signed int *a2)
+inline __vector signed int
+vec_ld (int a1, const int *a2)
{
- return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed int
-vec_ld (int a1, signed int *a2)
+inline __vector signed int
+vec_ld (int a1, const long *a2)
{
- return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed int
-vec_ld (int a1, signed long *a2)
+inline __vector unsigned int
+vec_ld (int a1, const __vector unsigned int *a2)
{
- return (vector signed int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ld (int a1, vector unsigned int *a2)
+inline __vector unsigned int
+vec_ld (int a1, const unsigned int *a2)
{
- return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ld (int a1, unsigned int *a2)
+inline __vector unsigned int
+vec_ld (int a1, const unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ld (int a1, unsigned long *a2)
+inline __vector __bool short
+vec_ld (int a1, const __vector __bool short *a2)
{
- return (vector unsigned int) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __bool short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed short
-vec_ld (int a1, vector signed short *a2)
+inline __vector __pixel
+vec_ld (int a1, const __vector __pixel *a2)
{
- return (vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __pixel) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed short
-vec_ld (int a1, signed short *a2)
+inline __vector signed short
+vec_ld (int a1, const __vector signed short *a2)
{
- return (vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ld (int a1, vector unsigned short *a2)
+inline __vector signed short
+vec_ld (int a1, const short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ld (int a1, unsigned short *a2)
+inline __vector unsigned short
+vec_ld (int a1, const __vector unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed char
-vec_ld (int a1, vector signed char *a2)
+inline __vector unsigned short
+vec_ld (int a1, const unsigned short *a2)
{
- return (vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector signed char
-vec_ld (int a1, signed char *a2)
+inline __vector __bool char
+vec_ld (int a1, const __vector __bool char *a2)
{
- return (vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector __bool char) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ld (int a1, vector unsigned char *a2)
+inline __vector signed char
+vec_ld (int a1, const __vector signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ld (int a1, unsigned char *a2)
+inline __vector signed char
+vec_ld (int a1, const signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector unsigned char
+vec_ld (int a1, const __vector unsigned char *a2)
+{
+ return (__vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
+}
+
+inline __vector unsigned char
+vec_ld (int a1, const unsigned char *a2)
+{
+ return (__vector unsigned char) __builtin_altivec_lvx (a1, (void *) a2);
}
/* vec_lde */
-inline vector signed char
-vec_lde (int a1, signed char *a2)
+inline __vector signed char
+vec_lde (int a1, const signed char *a2)
{
- return (vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lde (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_lde (int a1, const unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
}
-inline vector signed short
-vec_lde (int a1, signed short *a2)
+inline __vector signed short
+vec_lde (int a1, const short *a2)
{
- return (vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
}
-inline vector unsigned short
-vec_lde (int a1, unsigned short *a2)
+inline __vector unsigned short
+vec_lde (int a1, const unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
}
-inline vector float
-vec_lde (int a1, float *a2)
+inline __vector float
+vec_lde (int a1, const float *a2)
{
- return (vector float) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector float) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lde (int a1, signed int *a2)
+inline __vector signed int
+vec_lde (int a1, const int *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_lde (int a1, unsigned int *a2)
+inline __vector unsigned int
+vec_lde (int a1, const unsigned int *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lde (int a1, signed long *a2)
+inline __vector signed int
+vec_lde (int a1, const long *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
-vec_lde (int a1, unsigned long *a2)
+inline __vector unsigned int
+vec_lde (int a1, const unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
/* vec_lvewx */
-inline vector float
+inline __vector float
vec_lvewx (int a1, float *a2)
{
- return (vector float) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector float) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lvewx (int a1, signed int *a2)
+inline __vector signed int
+vec_lvewx (int a1, int *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
+inline __vector unsigned int
vec_lvewx (int a1, unsigned int *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector signed int
-vec_lvewx (int a1, signed long *a2)
+inline __vector signed int
+vec_lvewx (int a1, long *a2)
{
- return (vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvewx (a1, (void *) a2);
}
-inline vector unsigned int
+inline __vector unsigned int
vec_lvewx (int a1, unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvewx (a1, (void *) a2);
}
/* vec_lvehx */
-inline vector signed short
-vec_lvehx (int a1, signed short *a2)
+inline __vector signed short
+vec_lvehx (int a1, short *a2)
{
- return (vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvehx (a1, (void *) a2);
}
-inline vector unsigned short
+inline __vector unsigned short
vec_lvehx (int a1, unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvehx (a1, (void *) a2);
}
/* vec_lvebx */
-inline vector signed char
+inline __vector signed char
vec_lvebx (int a1, signed char *a2)
{
- return (vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvebx (a1, (void *) a2);
}
-inline vector unsigned char
+inline __vector unsigned char
vec_lvebx (int a1, unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvebx (a1, (void *) a2);
}
/* vec_ldl */
-inline vector float
-vec_ldl (int a1, vector float *a2)
+inline __vector float
+vec_ldl (int a1, const __vector float *a2)
+{
+ return (__vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector float
+vec_ldl (int a1, const float *a2)
+{
+ return (__vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector __bool int
+vec_ldl (int a1, const __vector __bool int *a2)
+{
+ return (__vector __bool int) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector signed int
+vec_ldl (int a1, const __vector signed int *a2)
{
- return (vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector float
-vec_ldl (int a1, float *a2)
+inline __vector signed int
+vec_ldl (int a1, const int *a2)
{
- return (vector float) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed int
-vec_ldl (int a1, vector signed int *a2)
+inline __vector signed int
+vec_ldl (int a1, const long *a2)
{
- return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed int
-vec_ldl (int a1, signed int *a2)
+inline __vector unsigned int
+vec_ldl (int a1, const __vector unsigned int *a2)
{
- return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed int
-vec_ldl (int a1, signed long *a2)
+inline __vector unsigned int
+vec_ldl (int a1, const unsigned int *a2)
{
- return (vector signed int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ldl (int a1, vector unsigned int *a2)
+inline __vector unsigned int
+vec_ldl (int a1, const unsigned long *a2)
{
- return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ldl (int a1, unsigned int *a2)
+inline __vector __bool short
+vec_ldl (int a1, const __vector __bool short *a2)
{
- return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector __bool short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned int
-vec_ldl (int a1, unsigned long *a2)
+inline __vector __pixel
+vec_ldl (int a1, const __vector __pixel *a2)
{
- return (vector unsigned int) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector __pixel) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed short
-vec_ldl (int a1, vector signed short *a2)
+inline __vector signed short
+vec_ldl (int a1, const __vector signed short *a2)
{
- return (vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed short
-vec_ldl (int a1, signed short *a2)
+inline __vector signed short
+vec_ldl (int a1, const short *a2)
{
- return (vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ldl (int a1, vector unsigned short *a2)
+inline __vector unsigned short
+vec_ldl (int a1, const __vector unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned short
-vec_ldl (int a1, unsigned short *a2)
+inline __vector unsigned short
+vec_ldl (int a1, const unsigned short *a2)
{
- return (vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned short) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed char
-vec_ldl (int a1, vector signed char *a2)
+inline __vector __bool char
+vec_ldl (int a1, const __vector __bool char *a2)
{
- return (vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector __bool char) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector signed char
-vec_ldl (int a1, signed char *a2)
+inline __vector signed char
+vec_ldl (int a1, const __vector signed char *a2)
{
- return (vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ldl (int a1, vector unsigned char *a2)
+inline __vector signed char
+vec_ldl (int a1, const signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector signed char) __builtin_altivec_lvxl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_ldl (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_ldl (int a1, const __vector unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
+}
+
+inline __vector unsigned char
+vec_ldl (int a1, const unsigned char *a2)
+{
+ return (__vector unsigned char) __builtin_altivec_lvxl (a1, (void *) a2);
}
/* vec_loge */
-inline vector float
-vec_loge (vector float a1)
+inline __vector float
+vec_loge (__vector float a1)
{
- return (vector float) __builtin_altivec_vlogefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vlogefp ((__vector float) a1);
}
/* vec_lvsl */
-inline vector unsigned char
-vec_lvsl (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed char *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, unsigned short *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed short *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, unsigned int *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed int *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, unsigned long *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile unsigned long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, signed long *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsl (int a1, float *a2)
+inline __vector unsigned char
+vec_lvsl (int a1, const volatile float *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsl (a1, (void *) a2);
}
/* vec_lvsr */
-inline vector unsigned char
-vec_lvsr (int a1, unsigned char *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed char *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile signed char *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, unsigned short *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed short *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile short *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, unsigned int *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed int *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile int *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, unsigned long *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile unsigned long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, signed long *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile long *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
-inline vector unsigned char
-vec_lvsr (int a1, float *a2)
+inline __vector unsigned char
+vec_lvsr (int a1, const volatile float *a2)
{
- return (vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
+ return (__vector unsigned char) __builtin_altivec_lvsr (a1, (void *) a2);
}
/* vec_madd */
-inline vector float
-vec_madd (vector float a1, vector float a2, vector float a3)
+inline __vector float
+vec_madd (__vector float a1, __vector float a2, __vector float a3)
{
- return (vector float) __builtin_altivec_vmaddfp ((vector float) a1, (vector float) a2, (vector float) a3);
+ return (__vector float) __builtin_altivec_vmaddfp ((__vector float) a1, (__vector float) a2, (__vector float) a3);
}
-
/* vec_madds */
-inline vector signed short
-vec_madds (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_madds (__vector signed short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmhaddshs ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmhaddshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
/* vec_max */
-inline vector unsigned char
-vec_max (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_max (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_max (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_max (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_max (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_max (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed char
+vec_max (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_max (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_max (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_max (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_max (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_max (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_max (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_max (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_max (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_max (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_max (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_max (vector signed int a1, vector unsigned int a2)
+inline __vector signed short
+vec_max (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_max (vector unsigned int a1, vector signed int a2)
+inline __vector signed short
+vec_max (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_max (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_max (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_max (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_max (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_max (vector float a1, vector float a2)
+inline __vector unsigned int
+vec_max (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_max (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_max (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_max (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_max (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_max (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vmaxfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vmaxfp */
-inline vector float
-vec_vmaxfp (vector float a1, vector float a2)
+inline __vector float
+vec_vmaxfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vmaxfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vmaxsw */
-inline vector signed int
-vec_vmaxsw (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vmaxsw (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vmaxsw (__vector signed int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vmaxsw (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vmaxsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmaxuw */
-inline vector unsigned int
-vec_vmaxuw (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vmaxuw (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmaxuw (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vmaxuw (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmaxuw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vmaxuw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmaxuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmaxsh */
-inline vector signed short
-vec_vmaxsh (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vmaxsh (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmaxsh (__vector signed short a1, __vector __bool short a2)
+{
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmaxsh (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vmaxsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmaxuh */
-inline vector unsigned short
-vec_vmaxuh (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vmaxuh (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmaxuh (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vmaxuh (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmaxuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vmaxuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmaxuh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmaxsb */
-inline vector signed char
-vec_vmaxsb (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vmaxsb (__vector __bool char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vmaxsb (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vmaxsb (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmaxsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vmaxub */
-inline vector unsigned char
-vec_vmaxub (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vmaxub (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmaxub (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vmaxub (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmaxub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vmaxub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmaxub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mergeh */
-inline vector signed char
-vec_mergeh (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_mergeh (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_mergeh (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_mergeh (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector __bool short
+vec_mergeh (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector __pixel
+vec_mergeh (__vector __pixel a1, __vector __pixel a2)
{
- return (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_mergeh (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed short
+vec_mergeh (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_mergeh (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_mergeh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_mergeh (vector unsigned short a1, vector unsigned short a2)
+inline __vector float
+vec_mergeh (__vector float a1, __vector float a2)
{
- return (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector float) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_mergeh (vector float a1, vector float a2)
+inline __vector __bool int
+vec_mergeh (__vector __bool int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_mergeh (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_mergeh (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_mergeh (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_mergeh (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrghw */
-inline vector float
-vec_vmrghw (vector float a1, vector float a2)
+inline __vector float
+vec_vmrghw (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_vmrghw (vector signed int a1, vector signed int a2)
+inline __vector __bool int
+vec_vmrghw (__vector __bool int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmrghw (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vmrghw (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vmrghw (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vmrghw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrghh */
-inline vector signed short
-vec_vmrghh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vmrghh (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmrghh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmrghh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vmrghh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector __pixel
+vec_vmrghh (__vector __pixel a1, __vector __pixel a2)
+{
+ return (__vector __pixel) __builtin_altivec_vmrghh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmrghb */
-inline vector signed char
-vec_vmrghb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vmrghb (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vmrghb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmrghb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vmrghb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vmrghb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mergel */
-inline vector signed char
-vec_mergel (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_mergel (__vector __bool char a1, __vector __bool char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_mergel (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_mergel (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector __bool short
+vec_mergel (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector __pixel
+vec_mergel (__vector __pixel a1, __vector __pixel a2)
{
- return (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_mergel (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed short
+vec_mergel (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_mergel (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_mergel (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_mergel (vector unsigned short a1, vector unsigned short a2)
+inline __vector float
+vec_mergel (__vector float a1, __vector float a2)
{
- return (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector float) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_mergel (vector float a1, vector float a2)
+inline __vector __bool int
+vec_mergel (__vector __bool int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_mergel (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_mergel (__vector signed int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_mergel (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_mergel (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrglw */
-inline vector float
-vec_vmrglw (vector float a1, vector float a2)
+inline __vector float
+vec_vmrglw (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vmrglw (__vector signed int a1, __vector signed int a2)
{
- return (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_vmrglw (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vmrglw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vmrglw (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool int
+vec_vmrglw (__vector __bool int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vmrglw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vmrglh */
-inline vector signed short
-vec_vmrglh (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_vmrglh (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vmrglh (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector unsigned short
+vec_vmrglh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vmrglh (vector unsigned short a1, vector unsigned short a2)
+inline __vector __pixel
+vec_vmrglh (__vector __pixel a1, __vector __pixel a2)
{
- return (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector __pixel) __builtin_altivec_vmrglh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmrglb */
-inline vector signed char
-vec_vmrglb (vector signed char a1, vector signed char a2)
+inline __vector __bool char
+vec_vmrglb (__vector __bool char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector __bool char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vmrglb (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vmrglb (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_vmrglb (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vmrglb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mfvscr */
-inline vector unsigned short
+inline __vector unsigned short
vec_mfvscr (void)
{
- return (vector unsigned short) __builtin_altivec_mfvscr ();
+ return (__vector unsigned short) __builtin_altivec_mfvscr ();
}
/* vec_min */
-inline vector unsigned char
-vec_min (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_min (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector unsigned char
+vec_min (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_min (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_min (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_min (vector unsigned char a1, vector unsigned char a2)
+inline __vector signed char
+vec_min (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_min (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_min (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_min (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_min (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_min (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_min (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_min (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_min (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_min (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_min (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_min (vector signed int a1, vector unsigned int a2)
+inline __vector signed short
+vec_min (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_min (vector unsigned int a1, vector signed int a2)
+inline __vector signed short
+vec_min (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_min (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_min (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_min (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_min (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_min (vector float a1, vector float a2)
+inline __vector unsigned int
+vec_min (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_min (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_min (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_min (__vector signed int a1, __vector __bool int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_min (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_min (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vminfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vminfp */
-inline vector float
-vec_vminfp (vector float a1, vector float a2)
+inline __vector float
+vec_vminfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vminfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vminsw */
-inline vector signed int
-vec_vminsw (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vminsw (__vector __bool int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vminsw (__vector signed int a1, __vector __bool int a2)
{
- return (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_vminsw (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vminsw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vminuw */
-inline vector unsigned int
-vec_vminuw (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vminuw (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vminuw (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_vminuw (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vminuw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vminuw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vminuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vminsh */
-inline vector signed short
-vec_vminsh (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vminsh (__vector __bool short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vminsh (__vector signed short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vminsh (__vector signed short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vminsh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vminuh */
-inline vector unsigned short
-vec_vminuh (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vminuh (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vminuh (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vminuh (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vminuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vminuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vminuh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vminsb */
-inline vector signed char
-vec_vminsb (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vminsb (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vminsb (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vminsb (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vminsb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vminub */
-inline vector unsigned char
-vec_vminub (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vminub (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vminub (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vminub (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vminub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vminub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vminub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mladd */
-inline vector signed short
-vec_mladd (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_mladd (__vector signed short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
-inline vector signed short
-vec_mladd (vector signed short a1, vector unsigned short a2, vector unsigned short a3)
+inline __vector signed short
+vec_mladd (__vector signed short a1, __vector unsigned short a2, __vector unsigned short a3)
{
- return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
-inline vector signed short
-vec_mladd (vector unsigned short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_mladd (__vector unsigned short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
-inline vector unsigned short
-vec_mladd (vector unsigned short a1, vector unsigned short a2, vector unsigned short a3)
+inline __vector unsigned short
+vec_mladd (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned short a3)
{
- return (vector unsigned short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector unsigned short) __builtin_altivec_vmladduhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
/* vec_mradds */
-inline vector signed short
-vec_mradds (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_mradds (__vector signed short a1, __vector signed short a2, __vector signed short a3)
{
- return (vector signed short) __builtin_altivec_vmhraddshs ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3);
+ return (__vector signed short) __builtin_altivec_vmhraddshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed short) a3);
}
/* vec_msum */
-inline vector unsigned int
-vec_msum (vector unsigned char a1, vector unsigned char a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_msum (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_msum (vector signed char a1, vector unsigned char a2, vector signed int a3)
+inline __vector signed int
+vec_msum (__vector signed char a1, __vector unsigned char a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
-inline vector unsigned int
-vec_msum (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_msum (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_msum (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_msum (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumshm */
-inline vector signed int
-vec_vmsumshm (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_vmsumshm (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumuhm */
-inline vector unsigned int
-vec_vmsumuhm (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_vmsumuhm (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhm ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsummbm */
-inline vector signed int
-vec_vmsummbm (vector signed char a1, vector unsigned char a2, vector signed int a3)
+inline __vector signed int
+vec_vmsummbm (__vector signed char a1, __vector unsigned char a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsummbm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
/* vec_vmsumubm */
-inline vector unsigned int
-vec_vmsumubm (vector unsigned char a1, vector unsigned char a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_vmsumubm (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumubm ((__vector signed char) a1, (__vector signed char) a2, (__vector signed int) a3);
}
/* vec_msums */
-inline vector unsigned int
-vec_msums (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_msums (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_msums (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_msums (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumshs */
-inline vector signed int
-vec_vmsumshs (vector signed short a1, vector signed short a2, vector signed int a3)
+inline __vector signed int
+vec_vmsumshs (__vector signed short a1, __vector signed short a2, __vector signed int a3)
{
- return (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vmsumshs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_vmsumuhs */
-inline vector unsigned int
-vec_vmsumuhs (vector unsigned short a1, vector unsigned short a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_vmsumuhs (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vmsumuhs ((__vector signed short) a1, (__vector signed short) a2, (__vector signed int) a3);
}
/* vec_mtvscr */
inline void
-vec_mtvscr (vector signed int a1)
+vec_mtvscr (__vector signed int a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector unsigned int a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector __bool int a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector unsigned int a1)
+vec_mtvscr (__vector signed short a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector signed short a1)
+vec_mtvscr (__vector unsigned short a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector unsigned short a1)
+vec_mtvscr (__vector __bool short a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector signed char a1)
+vec_mtvscr (__vector __pixel a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
inline void
-vec_mtvscr (vector unsigned char a1)
+vec_mtvscr (__vector signed char a1)
{
- __builtin_altivec_mtvscr ((vector signed int) a1);
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector unsigned char a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
+}
+
+inline void
+vec_mtvscr (__vector __bool char a1)
+{
+ __builtin_altivec_mtvscr ((__vector signed int) a1);
}
/* vec_mule */
-inline vector unsigned short
-vec_mule (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_mule (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_mule (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_mule (__vector signed char a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vmulesb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmulesb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned int
-vec_mule (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_mule (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_mule (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_mule (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulesh */
-inline vector signed int
-vec_vmulesh (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_vmulesh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulesh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmuleuh */
-inline vector unsigned int
-vec_vmuleuh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_vmuleuh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmuleuh ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+/* vec_vmulesb */
+
+inline __vector signed short
+vec_vmulesb (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vmuleub */
-inline vector unsigned short
-vec_vmuleub (vector unsigned char a1, vector unsigned char a2)
+
+inline __vector unsigned short
+vec_vmuleub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuleub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_mulo */
-inline vector unsigned short
-vec_mulo (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_mulo (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_mulo (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_mulo (__vector signed char a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned int
-vec_mulo (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_mulo (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_mulo (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_mulo (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulosh */
-inline vector signed int
-vec_vmulosh (vector signed short a1, vector signed short a2)
+inline __vector signed int
+vec_vmulosh (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed int) __builtin_altivec_vmulosh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulouh */
-inline vector unsigned int
-vec_vmulouh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_vmulouh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned int) __builtin_altivec_vmulouh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vmulosb */
-inline vector signed short
-vec_vmulosb (vector signed char a1, vector signed char a2)
+inline __vector signed short
+vec_vmulosb (__vector signed char a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed short) __builtin_altivec_vmulosb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_vmuloub */
-inline vector unsigned short
-vec_vmuloub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_vmuloub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned short) __builtin_altivec_vmuloub ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_nmsub */
-inline vector float
-vec_nmsub (vector float a1, vector float a2, vector float a3)
+inline __vector float
+vec_nmsub (__vector float a1, __vector float a2, __vector float a3)
{
- return (vector float) __builtin_altivec_vnmsubfp ((vector float) a1, (vector float) a2, (vector float) a3);
+ return (__vector float) __builtin_altivec_vnmsubfp ((__vector float) a1, (__vector float) a2, (__vector float) a3);
}
/* vec_nor */
-inline vector float
-vec_nor (vector float a1, vector float a2)
+inline __vector float
+vec_nor (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_nor (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_nor (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_nor (__vector __bool int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_nor (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_nor (__vector signed short a1, __vector signed short a2)
{
- return (vector signed int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_nor (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_nor (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_nor (vector signed short a1, vector signed short a2)
+inline __vector __bool short
+vec_nor (__vector __bool short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_nor (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed char
+vec_nor (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_nor (vector signed char a1, vector signed char a2)
+inline __vector unsigned char
+vec_nor (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_nor (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_nor (__vector __bool char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vnor ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_or */
-inline vector float
-vec_or (vector float a1, vector float a2)
+inline __vector float
+vec_or (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_or (__vector float a1, __vector __bool int a2)
+{
+ return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_or (__vector __bool int a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_or (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_or (__vector __bool int a1, __vector signed int a2)
{
- return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_or (vector float a1, vector signed int a2)
+inline __vector signed int
+vec_or (__vector signed int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_or (vector signed int a1, vector float a2)
+inline __vector signed int
+vec_or (__vector signed int a1, __vector signed int a2)
{
- return (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_or (vector signed int a1, vector signed int a2)
+inline __vector unsigned int
+vec_or (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_or (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_or (__vector unsigned int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_or (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned int
+vec_or (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_or (vector unsigned int a1, vector unsigned int a2)
+inline __vector __bool short
+vec_or (__vector __bool short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_or (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_or (__vector __bool short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_or (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_or (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_or (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_or (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_or (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_or (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_or (vector signed char a1, vector signed char a2)
+inline __vector unsigned short
+vec_or (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector signed char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_or (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_or (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_or (vector unsigned char a1, vector signed char a2)
+inline __vector signed char
+vec_or (__vector __bool char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_or (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool char
+vec_or (__vector __bool char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_or (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_or (__vector signed char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_or (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_or (__vector unsigned char a1, __vector __bool char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_or (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vor ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_pack */
-inline vector signed char
-vec_pack (vector signed short a1, vector signed short a2)
+inline __vector signed char
+vec_pack (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_pack (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_pack (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_pack (vector signed int a1, vector signed int a2)
+inline __vector __bool char
+vec_pack (__vector __bool short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_pack (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_pack (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned short
+vec_pack (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool short
+vec_pack (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkuwum */
-inline vector signed short
-vec_vpkuwum (vector signed int a1, vector signed int a2)
+inline __vector __bool short
+vec_vpkuwum (__vector __bool int a1, __vector __bool int a2)
+{
+ return (__vector __bool short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed short
+vec_vpkuwum (__vector signed int a1, __vector signed int a2)
{
- return (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_vpkuwum (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_vpkuwum (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwum ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkuhum */
-inline vector signed char
-vec_vpkuhum (vector signed short a1, vector signed short a2)
+inline __vector __bool char
+vec_vpkuhum (__vector __bool short a1, __vector __bool short a2)
+{
+ return (__vector __bool char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed char
+vec_vpkuhum (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_vpkuhum (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_vpkuhum (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhum ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_packpx */
-inline vector unsigned short
-vec_packpx (vector unsigned int a1, vector unsigned int a2)
+inline __vector __pixel
+vec_packpx (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkpx ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vpkpx ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_packs */
-inline vector unsigned char
-vec_packs (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_packs (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed char
-vec_packs (vector signed short a1, vector signed short a2)
+inline __vector signed char
+vec_packs (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_packs (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_packs (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_packs (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_packs (__vector signed int a1, __vector signed int a2)
{
- return (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkswss */
-inline vector signed short
-vec_vpkswss (vector signed int a1, vector signed int a2)
+inline __vector signed short
+vec_vpkswss (__vector signed int a1, __vector signed int a2)
{
- return (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vpkswss ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkuwus */
-inline vector unsigned short
-vec_vpkuwus (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_vpkuwus (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkshss */
-inline vector signed char
-vec_vpkshss (vector signed short a1, vector signed short a2)
+inline __vector signed char
+vec_vpkshss (__vector signed short a1, __vector signed short a2)
{
- return (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vpkshss ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vpkuhus */
-inline vector unsigned char
-vec_vpkuhus (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_vpkuhus (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_packsu */
-inline vector unsigned char
-vec_packsu (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned char
+vec_packsu (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkuhus ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned char
-vec_packsu (vector signed short a1, vector signed short a2)
+inline __vector unsigned char
+vec_packsu (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_packsu (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_packsu (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkuwus ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_packsu (vector signed int a1, vector signed int a2)
+inline __vector unsigned short
+vec_packsu (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkswus */
-inline vector unsigned short
-vec_vpkswus (vector signed int a1, vector signed int a2)
+inline __vector unsigned short
+vec_vpkswus (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vpkswus ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vpkshus */
-inline vector unsigned char
-vec_vpkshus (vector signed short a1, vector signed short a2)
+inline __vector unsigned char
+vec_vpkshus (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vpkshus ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_perm */
-inline vector float
-vec_perm (vector float a1, vector float a2, vector unsigned char a3)
+inline __vector float
+vec_perm (__vector float a1, __vector float a2, __vector unsigned char a3)
+{
+ return (__vector float) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector signed int
+vec_perm (__vector signed int a1, __vector signed int a2, __vector unsigned char a3)
+{
+ return (__vector signed int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector unsigned int
+vec_perm (__vector unsigned int a1, __vector unsigned int a2, __vector unsigned char a3)
{
- return (vector float) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector unsigned int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector signed int
-vec_perm (vector signed int a1, vector signed int a2, vector unsigned char a3)
+inline __vector __bool int
+vec_perm (__vector __bool int a1, __vector __bool int a2, __vector unsigned char a3)
{
- return (vector signed int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector __bool int) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector unsigned int
-vec_perm (vector unsigned int a1, vector unsigned int a2, vector unsigned char a3)
+inline __vector signed short
+vec_perm (__vector signed short a1, __vector signed short a2, __vector unsigned char a3)
{
- return (vector unsigned int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector signed short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector signed short
-vec_perm (vector signed short a1, vector signed short a2, vector unsigned char a3)
+inline __vector unsigned short
+vec_perm (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned char a3)
{
- return (vector signed short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector unsigned short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector unsigned short
-vec_perm (vector unsigned short a1, vector unsigned short a2, vector unsigned char a3)
+inline __vector __bool short
+vec_perm (__vector __bool short a1, __vector __bool short a2, __vector unsigned char a3)
{
- return (vector unsigned short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector __bool short) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector signed char
-vec_perm (vector signed char a1, vector signed char a2, vector unsigned char a3)
+inline __vector __pixel
+vec_perm (__vector __pixel a1, __vector __pixel a2, __vector unsigned char a3)
{
- return (vector signed char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector __pixel) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
-inline vector unsigned char
-vec_perm (vector unsigned char a1, vector unsigned char a2, vector unsigned char a3)
+inline __vector signed char
+vec_perm (__vector signed char a1, __vector signed char a2, __vector unsigned char a3)
{
- return (vector unsigned char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3);
+ return (__vector signed char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector unsigned char
+vec_perm (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned char a3)
+{
+ return (__vector unsigned char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
+}
+
+inline __vector __bool char
+vec_perm (__vector __bool char a1, __vector __bool char a2, __vector unsigned char a3)
+{
+ return (__vector __bool char) __builtin_altivec_vperm_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed char) a3);
}
/* vec_re */
-inline vector float
-vec_re (vector float a1)
+inline __vector float
+vec_re (__vector float a1)
{
- return (vector float) __builtin_altivec_vrefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrefp ((__vector float) a1);
}
/* vec_rl */
-inline vector signed char
-vec_rl (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_rl (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_rl (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_rl (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_rl (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_rl (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_rl (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_rl (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_rl (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_rl (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_rl (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_rl (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vrlw */
-inline vector signed int
-vec_vrlw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vrlw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vrlw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vrlw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vrlw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vrlh */
-inline vector signed short
-vec_vrlh (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vrlh (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vrlh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vrlh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vrlh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vrlb */
-inline vector signed char
-vec_vrlb (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vrlb (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vrlb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vrlb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vrlb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_round */
-inline vector float
-vec_round (vector float a1)
+inline __vector float
+vec_round (__vector float a1)
{
- return (vector float) __builtin_altivec_vrfin ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrfin ((__vector float) a1);
}
/* vec_rsqrte */
-inline vector float
-vec_rsqrte (vector float a1)
+inline __vector float
+vec_rsqrte (__vector float a1)
{
- return (vector float) __builtin_altivec_vrsqrtefp ((vector float) a1);
+ return (__vector float) __builtin_altivec_vrsqrtefp ((__vector float) a1);
}
/* vec_sel */
-inline vector float
-vec_sel (vector float a1, vector float a2, vector signed int a3)
+inline __vector float
+vec_sel (__vector float a1, __vector float a2, __vector __bool int a3)
+{
+ return (__vector float) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector float
+vec_sel (__vector float a1, __vector float a2, __vector unsigned int a3)
+{
+ return (__vector float) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector signed int
+vec_sel (__vector signed int a1, __vector signed int a2, __vector __bool int a3)
{
- return (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector float
-vec_sel (vector float a1, vector float a2, vector unsigned int a3)
+inline __vector signed int
+vec_sel (__vector signed int a1, __vector signed int a2, __vector unsigned int a3)
{
- return (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_sel (vector signed int a1, vector signed int a2, vector signed int a3)
+inline __vector unsigned int
+vec_sel (__vector unsigned int a1, __vector unsigned int a2, __vector __bool int a3)
{
- return (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed int
-vec_sel (vector signed int a1, vector signed int a2, vector unsigned int a3)
+inline __vector unsigned int
+vec_sel (__vector unsigned int a1, __vector unsigned int a2, __vector unsigned int a3)
{
- return (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned int
-vec_sel (vector unsigned int a1, vector unsigned int a2, vector signed int a3)
+inline __vector __bool int
+vec_sel (__vector __bool int a1, __vector __bool int a2, __vector __bool int a3)
{
- return (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned int
-vec_sel (vector unsigned int a1, vector unsigned int a2, vector unsigned int a3)
+inline __vector __bool int
+vec_sel (__vector __bool int a1, __vector __bool int a2, __vector unsigned int a3)
{
- return (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool int) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed short
-vec_sel (vector signed short a1, vector signed short a2, vector signed short a3)
+inline __vector signed short
+vec_sel (__vector signed short a1, __vector signed short a2, __vector __bool short a3)
{
- return (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed short
-vec_sel (vector signed short a1, vector signed short a2, vector unsigned short a3)
+inline __vector signed short
+vec_sel (__vector signed short a1, __vector signed short a2, __vector unsigned short a3)
{
- return (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned short
-vec_sel (vector unsigned short a1, vector unsigned short a2, vector signed short a3)
+inline __vector unsigned short
+vec_sel (__vector unsigned short a1, __vector unsigned short a2, __vector __bool short a3)
{
- return (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned short
-vec_sel (vector unsigned short a1, vector unsigned short a2, vector unsigned short a3)
+inline __vector unsigned short
+vec_sel (__vector unsigned short a1, __vector unsigned short a2, __vector unsigned short a3)
{
- return (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector unsigned short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed char
-vec_sel (vector signed char a1, vector signed char a2, vector signed char a3)
+inline __vector __bool short
+vec_sel (__vector __bool short a1, __vector __bool short a2, __vector __bool short a3)
{
- return (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector signed char
-vec_sel (vector signed char a1, vector signed char a2, vector unsigned char a3)
+inline __vector __bool short
+vec_sel (__vector __bool short a1, __vector __bool short a2, __vector unsigned short a3)
{
- return (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector __bool short) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned char
-vec_sel (vector unsigned char a1, vector unsigned char a2, vector signed char a3)
+inline __vector signed char
+vec_sel (__vector signed char a1, __vector signed char a2, __vector __bool char a3)
{
- return (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
-inline vector unsigned char
-vec_sel (vector unsigned char a1, vector unsigned char a2, vector unsigned char a3)
+inline __vector signed char
+vec_sel (__vector signed char a1, __vector signed char a2, __vector unsigned char a3)
{
- return (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3);
+ return (__vector signed char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector unsigned char
+vec_sel (__vector unsigned char a1, __vector unsigned char a2, __vector __bool char a3)
+{
+ return (__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector unsigned char
+vec_sel (__vector unsigned char a1, __vector unsigned char a2, __vector unsigned char a3)
+{
+ return (__vector unsigned char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector __bool char
+vec_sel (__vector __bool char a1, __vector __bool char a2, __vector __bool char a3)
+{
+ return (__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
+}
+
+inline __vector __bool char
+vec_sel (__vector __bool char a1, __vector __bool char a2, __vector unsigned char a3)
+{
+ return (__vector __bool char) __builtin_altivec_vsel_4si ((__vector signed int) a1, (__vector signed int) a2, (__vector signed int) a3);
}
/* vec_sl */
-inline vector signed char
-vec_sl (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sl (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sl (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sl (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sl (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sl (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sl (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sl (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sl (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sl (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sl (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_sl (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vslw */
-inline vector signed int
-vec_vslw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vslw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vslw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vslw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vslh */
-inline vector signed short
-vec_vslh (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vslh (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vslh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vslh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vslh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vslb */
-inline vector signed char
-vec_vslb (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vslb (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vslb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vslb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vslb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_sld */
-inline vector float
-vec_sld (vector float a1, vector float a2, const char a3)
+inline __vector float
+vec_sld (__vector float a1, __vector float a2, const int a3)
+{
+ return (__vector float) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector signed int
+vec_sld (__vector signed int a1, __vector signed int a2, const int a3)
+{
+ return (__vector signed int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector unsigned int
+vec_sld (__vector unsigned int a1, __vector unsigned int a2, const int a3)
+{
+ return (__vector unsigned int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector __bool int
+vec_sld (__vector __bool int a1, __vector __bool int a2, const int a3)
+{
+ return (__vector __bool int) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
+}
+
+inline __vector signed short
+vec_sld (__vector signed short a1, __vector signed short a2, const int a3)
{
- return (vector float) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector signed short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector signed int
-vec_sld (vector signed int a1, vector signed int a2, const char a3)
+inline __vector unsigned short
+vec_sld (__vector unsigned short a1, __vector unsigned short a2, const int a3)
{
- return (vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector unsigned short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector unsigned int
-vec_sld (vector unsigned int a1, vector unsigned int a2, const char a3)
+inline __vector __bool short
+vec_sld (__vector __bool short a1, __vector __bool short a2, const int a3)
{
- return (vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector __bool short) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector signed short
-vec_sld (vector signed short a1, vector signed short a2, const char a3)
+inline __vector __pixel
+vec_sld (__vector __pixel a1, __vector __pixel a2, const int a3)
{
- return (vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector __pixel) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector unsigned short
-vec_sld (vector unsigned short a1, vector unsigned short a2, const char a3)
+inline __vector signed char
+vec_sld (__vector signed char a1, __vector signed char a2, const int a3)
{
- return (vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector signed char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector signed char
-vec_sld (vector signed char a1, vector signed char a2, const char a3)
+inline __vector unsigned char
+vec_sld (__vector unsigned char a1, __vector unsigned char a2, const int a3)
{
- return (vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector unsigned char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
-inline vector unsigned char
-vec_sld (vector unsigned char a1, vector unsigned char a2, const char a3)
+inline __vector __bool char
+vec_sld (__vector __bool char a1, __vector __bool char a2, const int a3)
{
- return (vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, a3);
+ return (__vector __bool char) __builtin_altivec_vsldoi_4si ((__vector signed int) a1, (__vector signed int) a2, a3);
}
/* vec_sll */
-inline vector signed int
-vec_sll (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sll (__vector signed int a1, __vector unsigned int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_sll (__vector signed int a1, __vector unsigned short a2)
+{
+ return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_sll (__vector signed int a1, __vector unsigned char a2)
+{
+ return (__vector signed int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sll (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sll (__vector unsigned int a1, __vector unsigned short a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sll (__vector unsigned int a1, __vector unsigned char a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_sll (__vector __bool int a1, __vector unsigned int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool int
+vec_sll (__vector __bool int a1, __vector unsigned short a2)
{
- return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sll (vector signed int a1, vector unsigned short a2)
+inline __vector __bool int
+vec_sll (__vector __bool int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sll (vector signed int a1, vector unsigned char a2)
+inline __vector signed short
+vec_sll (__vector signed short a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sll (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed short
+vec_sll (__vector signed short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sll (vector unsigned int a1, vector unsigned short a2)
+inline __vector signed short
+vec_sll (__vector signed short a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sll (vector unsigned int a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_sll (__vector unsigned short a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sll (vector signed short a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_sll (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sll (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sll (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sll (vector signed short a1, vector unsigned char a2)
+inline __vector __bool short
+vec_sll (__vector __bool short a1, __vector unsigned int a2)
{
- return (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sll (vector unsigned short a1, vector unsigned int a2)
+inline __vector __bool short
+vec_sll (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sll (vector unsigned short a1, vector unsigned short a2)
+inline __vector __bool short
+vec_sll (__vector __bool short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sll (vector unsigned short a1, vector unsigned char a2)
+inline __vector __pixel
+vec_sll (__vector __pixel a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sll (vector signed char a1, vector unsigned int a2)
+inline __vector __pixel
+vec_sll (__vector __pixel a1, __vector unsigned short a2)
{
- return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sll (vector signed char a1, vector unsigned short a2)
+inline __vector __pixel
+vec_sll (__vector __pixel a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sll (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sll (__vector signed char a1, __vector unsigned int a2)
{
- return (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sll (vector unsigned char a1, vector unsigned int a2)
+inline __vector signed char
+vec_sll (__vector signed char a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sll (vector unsigned char a1, vector unsigned short a2)
+inline __vector signed char
+vec_sll (__vector signed char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sll (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sll (__vector unsigned char a1, __vector unsigned int a2)
{
- return (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_sll (__vector unsigned char a1, __vector unsigned short a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_sll (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_sll (__vector __bool char a1, __vector unsigned int a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_sll (__vector __bool char a1, __vector unsigned short a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_sll (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsl ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_slo */
-inline vector float
-vec_slo (vector float a1, vector signed char a2)
+inline __vector float
+vec_slo (__vector float a1, __vector signed char a2)
+{
+ return (__vector float) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_slo (__vector float a1, __vector unsigned char a2)
{
- return (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_slo (vector float a1, vector unsigned char a2)
+inline __vector signed int
+vec_slo (__vector signed int a1, __vector signed char a2)
{
- return (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_slo (vector signed int a1, vector signed char a2)
+inline __vector signed int
+vec_slo (__vector signed int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_slo (vector signed int a1, vector unsigned char a2)
+inline __vector unsigned int
+vec_slo (__vector unsigned int a1, __vector signed char a2)
{
- return (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_slo (vector unsigned int a1, vector signed char a2)
+inline __vector unsigned int
+vec_slo (__vector unsigned int a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_slo (vector unsigned int a1, vector unsigned char a2)
+inline __vector signed short
+vec_slo (__vector signed short a1, __vector signed char a2)
{
- return (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_slo (vector signed short a1, vector signed char a2)
+inline __vector signed short
+vec_slo (__vector signed short a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_slo (vector signed short a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_slo (__vector unsigned short a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_slo (vector unsigned short a1, vector signed char a2)
+inline __vector unsigned short
+vec_slo (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_slo (vector unsigned short a1, vector unsigned char a2)
+inline __vector __pixel
+vec_slo (__vector __pixel a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_slo (vector signed char a1, vector signed char a2)
+inline __vector __pixel
+vec_slo (__vector __pixel a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_slo (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_slo (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_slo (vector unsigned char a1, vector signed char a2)
+inline __vector signed char
+vec_slo (__vector signed char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_slo (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_slo (__vector unsigned char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_slo (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vslo ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_splat */
-inline vector signed char
-vec_splat (vector signed char a1, const char a2)
+inline __vector signed char
+vec_splat (__vector signed char a1, const int a2)
{
- return (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector signed char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector unsigned char
-vec_splat (vector unsigned char a1, const char a2)
+inline __vector unsigned char
+vec_splat (__vector unsigned char a1, const int a2)
{
- return (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector signed short
-vec_splat (vector signed short a1, const char a2)
+inline __vector __bool char
+vec_splat (__vector __bool char a1, const int a2)
{
- return (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector unsigned short
-vec_splat (vector unsigned short a1, const char a2)
+inline __vector signed short
+vec_splat (__vector signed short a1, const int a2)
{
- return (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector signed short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector float
-vec_splat (vector float a1, const char a2)
+inline __vector unsigned short
+vec_splat (__vector unsigned short a1, const int a2)
{
- return (vector float) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector signed int
-vec_splat (vector signed int a1, const char a2)
+inline __vector __bool short
+vec_splat (__vector __bool short a1, const int a2)
{
- return (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector unsigned int
-vec_splat (vector unsigned int a1, const char a2)
+inline __vector __pixel
+vec_splat (__vector __pixel a1, const int a2)
{
- return (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
+}
+
+inline __vector float
+vec_splat (__vector float a1, const int a2)
+{
+ return (__vector float) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector signed int
+vec_splat (__vector signed int a1, const int a2)
+{
+ return (__vector signed int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector unsigned int
+vec_splat (__vector unsigned int a1, const int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector __bool int
+vec_splat (__vector __bool int a1, const int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
/* vec_vspltw */
-inline vector float
-vec_vspltw (vector float a1, const char a2)
+inline __vector float
+vec_vspltw (__vector float a1, const int a2)
{
- return (vector float) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector float) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
-inline vector signed int
-vec_vspltw (vector signed int a1, const char a2)
+inline __vector signed int
+vec_vspltw (__vector signed int a1, const int a2)
{
- return (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector signed int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
-inline vector unsigned int
-vec_vspltw (vector unsigned int a1, const char a2)
+inline __vector unsigned int
+vec_vspltw (__vector unsigned int a1, const int a2)
{
- return (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, a2);
+ return (__vector unsigned int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
+}
+
+inline __vector __bool int
+vec_vspltw (__vector __bool int a1, const int a2)
+{
+ return (__vector __bool int) __builtin_altivec_vspltw ((__vector signed int) a1, a2);
}
/* vec_vsplth */
-inline vector signed short
-vec_vsplth (vector signed short a1, const char a2)
+inline __vector __bool short
+vec_vsplth (__vector __bool short a1, const int a2)
+{
+ return (__vector __bool short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
+}
+
+inline __vector signed short
+vec_vsplth (__vector signed short a1, const int a2)
{
- return (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector signed short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
-inline vector unsigned short
-vec_vsplth (vector unsigned short a1, const char a2)
+inline __vector unsigned short
+vec_vsplth (__vector unsigned short a1, const int a2)
{
- return (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, a2);
+ return (__vector unsigned short) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
+}
+
+inline __vector __pixel
+vec_vsplth (__vector __pixel a1, const int a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsplth ((__vector signed short) a1, a2);
}
/* vec_vspltb */
-inline vector signed char
-vec_vspltb (vector signed char a1, const char a2)
+inline __vector signed char
+vec_vspltb (__vector signed char a1, const int a2)
{
- return (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector signed char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
-inline vector unsigned char
-vec_vspltb (vector unsigned char a1, const char a2)
+inline __vector unsigned char
+vec_vspltb (__vector unsigned char a1, const int a2)
{
- return (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, a2);
+ return (__vector unsigned char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
+}
+
+inline __vector __bool char
+vec_vspltb (__vector __bool char a1, const int a2)
+{
+ return (__vector __bool char) __builtin_altivec_vspltb ((__vector signed char) a1, a2);
}
/* vec_splat_s8 */
-inline vector signed char
-vec_splat_s8 (const char a1)
+inline __vector signed char
+vec_splat_s8 (const int a1)
{
- return (vector signed char) __builtin_altivec_vspltisb (a1);
+ return (__vector signed char) __builtin_altivec_vspltisb (a1);
}
/* vec_splat_s16 */
-inline vector signed short
-vec_splat_s16 (const char a1)
+inline __vector signed short
+vec_splat_s16 (const int a1)
{
- return (vector signed short) __builtin_altivec_vspltish (a1);
+ return (__vector signed short) __builtin_altivec_vspltish (a1);
}
/* vec_splat_s32 */
-inline vector signed int
-vec_splat_s32 (const char a1)
+inline __vector signed int
+vec_splat_s32 (const int a1)
{
- return (vector signed int) __builtin_altivec_vspltisw (a1);
+ return (__vector signed int) __builtin_altivec_vspltisw (a1);
}
/* vec_splat_u8 */
-inline vector unsigned char
-vec_splat_u8 (const char a1)
+inline __vector unsigned char
+vec_splat_u8 (const int a1)
{
- return (vector unsigned char) __builtin_altivec_vspltisb (a1);
+ return (__vector unsigned char) __builtin_altivec_vspltisb (a1);
}
/* vec_splat_u16 */
-inline vector unsigned short
-vec_splat_u16 (const char a1)
+inline __vector unsigned short
+vec_splat_u16 (const int a1)
{
- return (vector unsigned short) __builtin_altivec_vspltish (a1);
+ return (__vector unsigned short) __builtin_altivec_vspltish (a1);
}
/* vec_splat_u32 */
-inline vector unsigned int
-vec_splat_u32 (const char a1)
+inline __vector unsigned int
+vec_splat_u32 (const int a1)
{
- return (vector unsigned int) __builtin_altivec_vspltisw (a1);
+ return (__vector unsigned int) __builtin_altivec_vspltisw (a1);
}
/* vec_sr */
-inline vector signed char
-vec_sr (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sr (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sr (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sr (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sr (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sr (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sr (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sr (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sr (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sr (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sr (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_sr (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsrw */
-inline vector signed int
-vec_vsrw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vsrw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsrw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsrw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsrw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsrh */
-inline vector signed short
-vec_vsrh (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vsrh (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsrh (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsrh (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrh ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsrb */
-inline vector signed char
-vec_vsrb (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vsrb (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsrb (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsrb (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrb ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_sra */
-inline vector signed char
-vec_sra (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sra (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sra (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sra (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sra (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sra (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sra (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_sra (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sra (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sra (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sra (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_sra (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsraw */
-inline vector signed int
-vec_vsraw (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vsraw (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsraw (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsraw (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsraw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsrah */
-inline vector signed short
-vec_vsrah (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_vsrah (__vector signed short a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsrah (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsrah (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsrah ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsrab */
-inline vector signed char
-vec_vsrab (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_vsrab (__vector signed char a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsrab (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsrab (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsrab ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_srl */
-inline vector signed int
-vec_srl (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_srl (__vector signed int a1, __vector unsigned int a2)
{
- return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_srl (vector signed int a1, vector unsigned short a2)
+inline __vector signed int
+vec_srl (__vector signed int a1, __vector unsigned short a2)
{
- return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_srl (vector signed int a1, vector unsigned char a2)
+inline __vector signed int
+vec_srl (__vector signed int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_srl (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_srl (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_srl (vector unsigned int a1, vector unsigned short a2)
+inline __vector unsigned int
+vec_srl (__vector unsigned int a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_srl (vector unsigned int a1, vector unsigned char a2)
+inline __vector unsigned int
+vec_srl (__vector unsigned int a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_srl (vector signed short a1, vector unsigned int a2)
+inline __vector __bool int
+vec_srl (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_srl (vector signed short a1, vector unsigned short a2)
+inline __vector __bool int
+vec_srl (__vector __bool int a1, __vector unsigned short a2)
{
- return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_srl (vector signed short a1, vector unsigned char a2)
+inline __vector __bool int
+vec_srl (__vector __bool int a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool int) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_srl (vector unsigned short a1, vector unsigned int a2)
+inline __vector signed short
+vec_srl (__vector signed short a1, __vector unsigned int a2)
{
- return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_srl (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed short
+vec_srl (__vector signed short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_srl (vector unsigned short a1, vector unsigned char a2)
+inline __vector signed short
+vec_srl (__vector signed short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_srl (vector signed char a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_srl (__vector unsigned short a1, __vector unsigned int a2)
{
- return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_srl (vector signed char a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_srl (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_srl (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_srl (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_srl (vector unsigned char a1, vector unsigned int a2)
+inline __vector __bool short
+vec_srl (__vector __bool short a1, __vector unsigned int a2)
{
- return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_srl (vector unsigned char a1, vector unsigned short a2)
+inline __vector __bool short
+vec_srl (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_srl (vector unsigned char a1, vector unsigned char a2)
+inline __vector __bool short
+vec_srl (__vector __bool short a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __bool short) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __pixel
+vec_srl (__vector __pixel a1, __vector unsigned int a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __pixel
+vec_srl (__vector __pixel a1, __vector unsigned short a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __pixel
+vec_srl (__vector __pixel a1, __vector unsigned char a2)
+{
+ return (__vector __pixel) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_srl (__vector signed char a1, __vector unsigned int a2)
+{
+ return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_srl (__vector signed char a1, __vector unsigned short a2)
+{
+ return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed char
+vec_srl (__vector signed char a1, __vector unsigned char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_srl (__vector unsigned char a1, __vector unsigned int a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_srl (__vector unsigned char a1, __vector unsigned short a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_srl (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_srl (__vector __bool char a1, __vector unsigned int a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_srl (__vector __bool char a1, __vector unsigned short a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector __bool char
+vec_srl (__vector __bool char a1, __vector unsigned char a2)
+{
+ return (__vector __bool char) __builtin_altivec_vsr ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_sro */
-inline vector float
-vec_sro (vector float a1, vector signed char a2)
+inline __vector float
+vec_sro (__vector float a1, __vector signed char a2)
+{
+ return (__vector float) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_sro (__vector float a1, __vector unsigned char a2)
{
- return (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector float) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_sro (vector float a1, vector unsigned char a2)
+inline __vector signed int
+vec_sro (__vector signed int a1, __vector signed char a2)
{
- return (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sro (vector signed int a1, vector signed char a2)
+inline __vector signed int
+vec_sro (__vector signed int a1, __vector unsigned char a2)
{
- return (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed int
-vec_sro (vector signed int a1, vector unsigned char a2)
+inline __vector unsigned int
+vec_sro (__vector unsigned int a1, __vector signed char a2)
{
- return (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sro (vector unsigned int a1, vector signed char a2)
+inline __vector unsigned int
+vec_sro (__vector unsigned int a1, __vector unsigned char a2)
{
- return (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_sro (vector unsigned int a1, vector unsigned char a2)
+inline __vector signed short
+vec_sro (__vector signed short a1, __vector signed char a2)
{
- return (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sro (vector signed short a1, vector signed char a2)
+inline __vector signed short
+vec_sro (__vector signed short a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed short
-vec_sro (vector signed short a1, vector unsigned char a2)
+inline __vector unsigned short
+vec_sro (__vector unsigned short a1, __vector signed char a2)
{
- return (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sro (vector unsigned short a1, vector signed char a2)
+inline __vector unsigned short
+vec_sro (__vector unsigned short a1, __vector unsigned char a2)
{
- return (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned short
-vec_sro (vector unsigned short a1, vector unsigned char a2)
+inline __vector __pixel
+vec_sro (__vector __pixel a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sro (vector signed char a1, vector signed char a2)
+inline __vector __pixel
+vec_sro (__vector __pixel a1, __vector unsigned char a2)
{
- return (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector __pixel) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector signed char
-vec_sro (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sro (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sro (vector unsigned char a1, vector signed char a2)
+inline __vector signed char
+vec_sro (__vector signed char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned char
-vec_sro (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sro (__vector unsigned char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned char
+vec_sro (__vector unsigned char a1, __vector unsigned char a2)
+{
+ return (__vector unsigned char) __builtin_altivec_vsro ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_st */
inline void
-vec_st (vector float a1, int a2, void *a3)
+vec_st (__vector float a1, int a2, __vector float *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector float a1, int a2, float *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed int a1, int a2, __vector signed int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed int a1, int a2, int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned int a1, int a2, __vector unsigned int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool int a1, int a2, __vector __bool int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool int a1, int a2, int *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector signed int a1, int a2, void *a3)
+vec_st (__vector signed short a1, int a2, __vector signed short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector unsigned int a1, int a2, void *a3)
+vec_st (__vector signed short a1, int a2, short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector signed short a1, int a2, void *a3)
+vec_st (__vector unsigned short a1, int a2, __vector unsigned short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector unsigned short a1, int a2, void *a3)
+vec_st (__vector unsigned short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector signed char a1, int a2, void *a3)
+vec_st (__vector __bool short a1, int a2, __vector __bool short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_st (vector unsigned char a1, int a2, void *a3)
+vec_st (__vector __bool short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __pixel a1, int a2, __vector __pixel *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __pixel a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __pixel a1, int a2, short *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed char a1, int a2, __vector signed char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned char a1, int a2, __vector unsigned char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool char a1, int a2, __vector __bool char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_st (__vector __bool char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvx ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_ste */
inline void
-vec_ste (vector signed char a1, int a2, void *a3)
+vec_ste (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector signed short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector unsigned short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool short a1, int a2, short *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector unsigned char a1, int a2, void *a3)
+vec_ste (__vector __bool short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector signed short a1, int a2, void *a3)
+vec_ste (__vector __pixel a1, int a2, short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector unsigned short a1, int a2, void *a3)
+vec_ste (__vector __pixel a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector float a1, int a2, void *a3)
+vec_ste (__vector float a1, int a2, float *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector signed int a1, int a2, void *a3)
+vec_ste (__vector signed int a1, int a2, int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_ste (vector unsigned int a1, int a2, void *a3)
+vec_ste (__vector unsigned int a1, int a2, unsigned int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool int a1, int a2, int *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_ste (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_stvewx */
inline void
-vec_stvewx (vector float a1, int a2, void *a3)
+vec_stvewx (__vector float a1, int a2, float *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvewx (__vector signed int a1, int a2, int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stvewx (vector signed int a1, int a2, void *a3)
+vec_stvewx (__vector unsigned int a1, int a2, unsigned int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stvewx (vector unsigned int a1, int a2, void *a3)
+vec_stvewx (__vector __bool int a1, int a2, int *a3)
{
- __builtin_altivec_stvewx ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvewx (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvewx ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_stvehx */
inline void
-vec_stvehx (vector signed short a1, int a2, void *a3)
+vec_stvehx (__vector signed short a1, int a2, short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
inline void
-vec_stvehx (vector unsigned short a1, int a2, void *a3)
+vec_stvehx (__vector unsigned short a1, int a2, unsigned short *a3)
{
- __builtin_altivec_stvehx ((vector signed short) a1, a2, (void *) a3);
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __bool short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __bool short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __pixel a1, int a2, short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvehx (__vector __pixel a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvehx ((__vector signed short) a1, a2, (void *) a3);
}
/* vec_stvebx */
inline void
-vec_stvebx (vector signed char a1, int a2, void *a3)
+vec_stvebx (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvebx (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stvebx (__vector __bool char a1, int a2, signed char *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
}
inline void
-vec_stvebx (vector unsigned char a1, int a2, void *a3)
+vec_stvebx (__vector __bool char a1, int a2, unsigned char *a3)
{
- __builtin_altivec_stvebx ((vector signed char) a1, a2, (void *) a3);
+ __builtin_altivec_stvebx ((__vector signed char) a1, a2, (void *) a3);
}
/* vec_stl */
inline void
-vec_stl (vector float a1, int a2, void *a3)
+vec_stl (__vector float a1, int a2, __vector float *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector signed int a1, int a2, void *a3)
+vec_stl (__vector float a1, int a2, float *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector unsigned int a1, int a2, void *a3)
+vec_stl (__vector signed int a1, int a2, __vector signed int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector signed short a1, int a2, void *a3)
+vec_stl (__vector signed int a1, int a2, int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector unsigned short a1, int a2, void *a3)
+vec_stl (__vector unsigned int a1, int a2, __vector unsigned int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector signed char a1, int a2, void *a3)
+vec_stl (__vector unsigned int a1, int a2, unsigned int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
inline void
-vec_stl (vector unsigned char a1, int a2, void *a3)
+vec_stl (__vector __bool int a1, int a2, __vector __bool int *a3)
{
- __builtin_altivec_stvxl ((vector signed int) a1, a2, (void *) a3);
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool int a1, int a2, unsigned int *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool int a1, int a2, int *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed short a1, int a2, __vector signed short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned short a1, int a2, __vector unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool short a1, int a2, __vector __bool short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool short a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool short a1, int a2, short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __pixel a1, int a2, __vector __pixel *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __pixel a1, int a2, unsigned short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __pixel a1, int a2, short *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed char a1, int a2, __vector signed char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector signed char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned char a1, int a2, __vector unsigned char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector unsigned char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool char a1, int a2, __vector __bool char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool char a1, int a2, unsigned char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
+}
+
+inline void
+vec_stl (__vector __bool char a1, int a2, signed char *a3)
+{
+ __builtin_altivec_stvxl ((__vector signed int) a1, a2, (void *) a3);
}
/* vec_sub */
-inline vector signed char
-vec_sub (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_sub (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_sub (__vector signed char a1, __vector __bool char a2)
{
- return (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sub (vector signed char a1, vector unsigned char a2)
+inline __vector signed char
+vec_sub (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sub (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_sub (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_sub (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_sub (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed short
-vec_sub (vector signed short a1, vector signed short a2)
+inline __vector unsigned char
+vec_sub (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_sub (vector signed short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sub (__vector __bool short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sub (vector unsigned short a1, vector signed short a2)
+inline __vector signed short
+vec_sub (__vector signed short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_sub (vector unsigned short a1, vector unsigned short a2)
+inline __vector signed short
+vec_sub (__vector signed short a1, __vector signed short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed int
-vec_sub (vector signed int a1, vector signed int a2)
+inline __vector unsigned short
+vec_sub (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_sub (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_sub (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_sub (vector unsigned int a1, vector signed int a2)
+inline __vector unsigned short
+vec_sub (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_sub (vector unsigned int a1, vector unsigned int a2)
+inline __vector signed int
+vec_sub (__vector __bool int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector float
-vec_sub (vector float a1, vector float a2)
+inline __vector signed int
+vec_sub (__vector signed int a1, __vector __bool int a2)
{
- return (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector signed int
+vec_sub (__vector signed int a1, __vector signed int a2)
+{
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sub (__vector __bool int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sub (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_sub (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector float
+vec_sub (__vector float a1, __vector float a2)
+{
+ return (__vector float) __builtin_altivec_vsubfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vsubfp */
-inline vector float
-vec_vsubfp (vector float a1, vector float a2)
+inline __vector float
+vec_vsubfp (__vector float a1, __vector float a2)
{
- return (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2);
+ return (__vector float) __builtin_altivec_vsubfp ((__vector float) a1, (__vector float) a2);
}
/* vec_vsubuwm */
-inline vector signed int
-vec_vsubuwm (vector signed int a1, vector signed int a2)
+inline __vector signed int
+vec_vsubuwm (__vector __bool int a1, __vector signed int a2)
{
- return (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuwm (vector signed int a1, vector unsigned int a2)
+inline __vector signed int
+vec_vsubuwm (__vector signed int a1, __vector __bool int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuwm (vector unsigned int a1, vector signed int a2)
+inline __vector signed int
+vec_vsubuwm (__vector signed int a1, __vector signed int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector signed int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
-inline vector unsigned int
-vec_vsubuwm (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_vsubuwm (__vector __bool int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vsubuwm (__vector unsigned int a1, __vector __bool int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
+}
+
+inline __vector unsigned int
+vec_vsubuwm (__vector unsigned int a1, __vector unsigned int a2)
+{
+ return (__vector unsigned int) __builtin_altivec_vsubuwm ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_vsubuhm */
-inline vector signed short
-vec_vsubuhm (vector signed short a1, vector signed short a2)
+inline __vector signed short
+vec_vsubuhm (__vector __bool short a1, __vector signed short a2)
+{
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vsubuhm (__vector signed short a1, __vector __bool short a2)
+{
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
+}
+
+inline __vector signed short
+vec_vsubuhm (__vector signed short a1, __vector signed short a2)
{
- return (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhm (vector signed short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsubuhm (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhm (vector unsigned short a1, vector signed short a2)
+inline __vector unsigned short
+vec_vsubuhm (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned short
-vec_vsubuhm (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_vsubuhm (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhm ((__vector signed short) a1, (__vector signed short) a2);
}
/* vec_vsububm */
-inline vector signed char
-vec_vsububm (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_vsububm (__vector __bool char a1, __vector signed char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vsububm (__vector signed char a1, __vector __bool char a2)
+{
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
+}
+
+inline __vector signed char
+vec_vsububm (__vector signed char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububm (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsububm (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububm (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_vsububm (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_vsububm (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_vsububm (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububm ((__vector signed char) a1, (__vector signed char) a2);
}
/* vec_subc */
-inline vector unsigned int
-vec_subc (vector unsigned int a1, vector unsigned int a2)
+inline __vector unsigned int
+vec_subc (__vector unsigned int a1, __vector unsigned int a2)
{
- return (vector unsigned int) __builtin_altivec_vsubcuw ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned int) __builtin_altivec_vsubcuw ((__vector signed int) a1, (__vector signed int) a2);
}
/* vec_subs */
-inline vector unsigned char
-vec_subs (vector signed char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_subs (__vector __bool char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_subs (vector unsigned char a1, vector signed char a2)
+inline __vector unsigned char
+vec_subs (__vector unsigned char a1, __vector __bool char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned char
-vec_subs (vector unsigned char a1, vector unsigned char a2)
+inline __vector unsigned char
+vec_subs (__vector unsigned char a1, __vector unsigned char a2)
{
- return (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector unsigned char) __builtin_altivec_vsububs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector signed char
-vec_subs (vector signed char a1, vector signed char a2)
+inline __vector signed char
+vec_subs (__vector __bool char a1, __vector signed char a2)
{
- return (vector signed char) __builtin_altivec_vsubsbs ((vector signed char) a1, (vector signed char) a2);
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_subs (vector signed short a1, vector unsigned short a2)
+inline __vector signed char
+vec_subs (__vector signed char a1, __vector __bool char a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_subs (vector unsigned short a1, vector signed short a2)
+inline __vector signed char
+vec_subs (__vector signed char a1, __vector signed char a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector signed char) __builtin_altivec_vsubsbs ((__vector signed char) a1, (__vector signed char) a2);
}
-inline vector unsigned short
-vec_subs (vector unsigned short a1, vector unsigned short a2)
+inline __vector unsigned short
+vec_subs (__vector __bool short a1, __vector unsigned short a2)
{
- return (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector signed short
-vec_subs (vector signed short a1, vector signed short a2)
+inline __vector unsigned short
+vec_subs (__vector unsigned short a1, __vector __bool short a2)
{
- return (vector signed short) __builtin_altivec_vsubshs ((vector signed short) a1, (vector signed short) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}
-inline vector unsigned int
-vec_subs (vector signed int a1, vector unsigned int a2)
+inline __vector unsigned short
+vec_subs (__vector unsigned short a1, __vector unsigned short a2)
{
- return (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2);
+ return (__vector unsigned short) __builtin_altivec_vsubuhs ((__vector signed short) a1, (__vector signed short) a2);
}