-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/asm.h avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/asm.h
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/asm.h 2002-10-16 09:26:12.000000000 +0200
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/asm.h 2003-09-28 17:26:39.000000000 +0200
-@@ -42,14 +42,14 @@
- #define AMASK_CIX (1 << 2)
- #define AMASK_MVI (1 << 8)
-
--inline static uint64_t BYTE_VEC(uint64_t x)
-+static inline uint64_t BYTE_VEC(uint64_t x)
- {
- x |= x << 8;
- x |= x << 16;
- x |= x << 32;
- return x;
- }
--inline static uint64_t WORD_VEC(uint64_t x)
-+static inline uint64_t WORD_VEC(uint64_t x)
- {
- x |= x << 16;
- x |= x << 32;
-@@ -63,27 +63,15 @@
- #define sextw(x) ((int16_t) (x))
-
- #ifdef __GNUC__
--#define ASM_ACCEPT_MVI asm (".arch pca56")
- struct unaligned_long { uint64_t l; } __attribute__((packed));
- #define ldq_u(p) (*(const uint64_t *) (((uint64_t) (p)) & ~7ul))
- #define uldq(a) (((const struct unaligned_long *) (a))->l)
-
--#if GNUC_PREREQ(3,0)
--/* Unfortunately, __builtin_prefetch is slightly buggy on Alpha. The
-- defines here are kludged so we still get the right
-- instruction. This needs to be adapted as soon as gcc is fixed. */
--# define prefetch(p) __builtin_prefetch((p), 0, 1)
--# define prefetch_en(p) __builtin_prefetch((p), 1, 1)
--# define prefetch_m(p) __builtin_prefetch((p), 0, 0)
--# define prefetch_men(p) __builtin_prefetch((p), 1, 0)
--#else
--# define prefetch(p) asm volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory")
--# define prefetch_en(p) asm volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory")
--# define prefetch_m(p) asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
--# define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
--#endif
--
- #if GNUC_PREREQ(3,3)
-+#define prefetch(p) __builtin_prefetch((p), 0, 1)
-+#define prefetch_en(p) __builtin_prefetch((p), 0, 0)
-+#define prefetch_m(p) __builtin_prefetch((p), 1, 1)
-+#define prefetch_men(p) __builtin_prefetch((p), 1, 0)
- #define cmpbge __builtin_alpha_cmpbge
- /* Avoid warnings. */
- #define extql(a, b) __builtin_alpha_extql(a, (uint64_t) (b))
-@@ -94,6 +82,24 @@
- #define amask __builtin_alpha_amask
- #define implver __builtin_alpha_implver
- #define rpcc __builtin_alpha_rpcc
-+#else
-+#define prefetch(p) asm volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory")
-+#define prefetch_en(p) asm volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory")
-+#define prefetch_m(p) asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
-+#define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
-+#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
-+#define extql(a, b) ({ uint64_t __r; asm ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
-+#define extwl(a, b) ({ uint64_t __r; asm ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
-+#define extqh(a, b) ({ uint64_t __r; asm ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
-+#define zap(a, b) ({ uint64_t __r; asm ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
-+#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
-+#define amask(a) ({ uint64_t __r; asm ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; })
-+#define implver() ({ uint64_t __r; asm ("implver %0" : "=r" (__r)); __r; })
-+#define rpcc() ({ uint64_t __r; asm volatile ("rpcc %0" : "=r" (__r)); __r; })
-+#endif
-+#define wh64(p) asm volatile("wh64 (%0)" : : "r"(p) : "memory")
-+
-+#if GNUC_PREREQ(3,3) && defined(__alpha_max__)
- #define minub8 __builtin_alpha_minub8
- #define minsb8 __builtin_alpha_minsb8
- #define minuw4 __builtin_alpha_minuw4
-@@ -108,34 +114,24 @@
- #define unpkbl __builtin_alpha_unpkbl
- #define unpkbw __builtin_alpha_unpkbw
- #else
--#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
--#define extql(a, b) ({ uint64_t __r; asm ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
--#define extwl(a, b) ({ uint64_t __r; asm ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
--#define extqh(a, b) ({ uint64_t __r; asm ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
--#define zap(a, b) ({ uint64_t __r; asm ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
--#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
--#define amask(a) ({ uint64_t __r; asm ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; })
--#define implver() ({ uint64_t __r; asm ("implver %0" : "=r" (__r)); __r; })
--#define rpcc() ({ uint64_t __r; asm volatile ("rpcc %0" : "=r" (__r)); __r; })
--#define minub8(a, b) ({ uint64_t __r; asm ("minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define minsb8(a, b) ({ uint64_t __r; asm ("minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define minuw4(a, b) ({ uint64_t __r; asm ("minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define minsw4(a, b) ({ uint64_t __r; asm ("minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define maxub8(a, b) ({ uint64_t __r; asm ("maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define maxsb8(a, b) ({ uint64_t __r; asm ("maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define maxuw4(a, b) ({ uint64_t __r; asm ("maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define maxsw4(a, b) ({ uint64_t __r; asm ("maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
--#define perr(a, b) ({ uint64_t __r; asm ("perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
--#define pklb(a) ({ uint64_t __r; asm ("pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
--#define pkwb(a) ({ uint64_t __r; asm ("pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
--#define unpkbl(a) ({ uint64_t __r; asm ("unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
--#define unpkbw(a) ({ uint64_t __r; asm ("unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
-+#define minub8(a, b) ({ uint64_t __r; asm (".arch ev6; minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define minsb8(a, b) ({ uint64_t __r; asm (".arch ev6; minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define minuw4(a, b) ({ uint64_t __r; asm (".arch ev6; minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define minsw4(a, b) ({ uint64_t __r; asm (".arch ev6; minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define maxub8(a, b) ({ uint64_t __r; asm (".arch ev6; maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define maxsb8(a, b) ({ uint64_t __r; asm (".arch ev6; maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define maxuw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define maxsw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-+#define perr(a, b) ({ uint64_t __r; asm (".arch ev6; perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
-+#define pklb(a) ({ uint64_t __r; asm (".arch ev6; pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
-+#define pkwb(a) ({ uint64_t __r; asm (".arch ev6; pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
-+#define unpkbl(a) ({ uint64_t __r; asm (".arch ev6; unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
-+#define unpkbw(a) ({ uint64_t __r; asm (".arch ev6; unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
- #endif
-
- #elif defined(__DECC) /* Digital/Compaq/hp "ccc" compiler */
-
- #include <c_asm.h>
--#define ASM_ACCEPT_MVI
- #define ldq_u(a) asm ("ldq_u %v0,0(%a0)", a)
- #define uldq(a) (*(const __unaligned uint64_t *) (a))
- #define cmpbge(a, b) asm ("cmpbge %a0,%a1,%v0", a, b)
-@@ -160,6 +156,7 @@
- #define pkwb(a) asm ("pkwb %a0,%v0", a)
- #define unpkbl(a) asm ("unpkbl %a0,%v0", a)
- #define unpkbw(a) asm ("unpkbw %a0,%v0", a)
-+#define wh64(a) asm ("wh64 %a0", a)
-
- #else
- #error "Unknown compiler!"
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/dsputil_alpha.c avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/dsputil_alpha.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/dsputil_alpha.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/dsputil_alpha.c 2003-09-28 17:26:39.000000000 +0200
-@@ -0,0 +1,364 @@
-+/*
-+ * Alpha optimized DSP utils
-+ * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include "asm.h"
-+#include "../dsputil.h"
-+
-+extern void simple_idct_axp(DCTELEM *block);
-+extern void simple_idct_put_axp(uint8_t *dest, int line_size, DCTELEM *block);
-+extern void simple_idct_add_axp(uint8_t *dest, int line_size, DCTELEM *block);
-+
-+void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels,
-+ int line_size, int h);
-+void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels,
-+ int line_size);
-+void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels,
-+ int line_size);
-+void (*put_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels,
-+ int line_size);
-+void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels,
-+ int line_size);
-+
-+void get_pixels_mvi(DCTELEM *restrict block,
-+ const uint8_t *restrict pixels, int line_size);
-+void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
-+ int stride);
-+int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
-+int pix_abs16x16_mvi_asm(uint8_t *pix1, uint8_t *pix2, int line_size);
-+int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
-+int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
-+int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
-+
-+#if 0
-+/* These functions were the base for the optimized assembler routines,
-+ and remain here for documentation purposes. */
-+static void put_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels,
-+ int line_size)
-+{
-+ int i = 8;
-+ uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */
-+
-+ do {
-+ uint64_t shorts0, shorts1;
-+
-+ shorts0 = ldq(block);
-+ shorts0 = maxsw4(shorts0, 0);
-+ shorts0 = minsw4(shorts0, clampmask);
-+ stl(pkwb(shorts0), pixels);
-+
-+ shorts1 = ldq(block + 4);
-+ shorts1 = maxsw4(shorts1, 0);
-+ shorts1 = minsw4(shorts1, clampmask);
-+ stl(pkwb(shorts1), pixels + 4);
-+
-+ pixels += line_size;
-+ block += 8;
-+ } while (--i);
-+}
-+
-+void add_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels,
-+ int line_size)
-+{
-+ int h = 8;
-+ /* Keep this function a leaf function by generating the constants
-+ manually (mainly for the hack value ;-). */
-+ uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */
-+ uint64_t signmask = zap(-1, 0x33);
-+ signmask ^= signmask >> 1; /* 0x8000800080008000 */
-+
-+ do {
-+ uint64_t shorts0, pix0, signs0;
-+ uint64_t shorts1, pix1, signs1;
-+
-+ shorts0 = ldq(block);
-+ shorts1 = ldq(block + 4);
-+
-+ pix0 = unpkbw(ldl(pixels));
-+ /* Signed subword add (MMX paddw). */
-+ signs0 = shorts0 & signmask;
-+ shorts0 &= ~signmask;
-+ shorts0 += pix0;
-+ shorts0 ^= signs0;
-+ /* Clamp. */
-+ shorts0 = maxsw4(shorts0, 0);
-+ shorts0 = minsw4(shorts0, clampmask);
-+
-+ /* Next 4. */
-+ pix1 = unpkbw(ldl(pixels + 4));
-+ signs1 = shorts1 & signmask;
-+ shorts1 &= ~signmask;
-+ shorts1 += pix1;
-+ shorts1 ^= signs1;
-+ shorts1 = maxsw4(shorts1, 0);
-+ shorts1 = minsw4(shorts1, clampmask);
-+
-+ stl(pkwb(shorts0), pixels);
-+ stl(pkwb(shorts1), pixels + 4);
-+
-+ pixels += line_size;
-+ block += 8;
-+ } while (--h);
-+}
-+#endif
-+
-+static void clear_blocks_axp(DCTELEM *blocks) {
-+ uint64_t *p = (uint64_t *) blocks;
-+ int n = sizeof(DCTELEM) * 6 * 64;
-+
-+ do {
-+ p[0] = 0;
-+ p[1] = 0;
-+ p[2] = 0;
-+ p[3] = 0;
-+ p[4] = 0;
-+ p[5] = 0;
-+ p[6] = 0;
-+ p[7] = 0;
-+ p += 8;
-+ n -= 8 * 8;
-+ } while (n);
-+}
-+
-+static inline uint64_t avg2_no_rnd(uint64_t a, uint64_t b)
-+{
-+ return (a & b) + (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
-+}
-+
-+static inline uint64_t avg2(uint64_t a, uint64_t b)
-+{
-+ return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
-+}
-+
-+#if 0
-+/* The XY2 routines basically utilize this scheme, but reuse parts in
-+ each iteration. */
-+static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
-+{
-+ uint64_t r1 = ((l1 & ~BYTE_VEC(0x03)) >> 2)
-+ + ((l2 & ~BYTE_VEC(0x03)) >> 2)
-+ + ((l3 & ~BYTE_VEC(0x03)) >> 2)
-+ + ((l4 & ~BYTE_VEC(0x03)) >> 2);
-+ uint64_t r2 = (( (l1 & BYTE_VEC(0x03))
-+ + (l2 & BYTE_VEC(0x03))
-+ + (l3 & BYTE_VEC(0x03))
-+ + (l4 & BYTE_VEC(0x03))
-+ + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
-+ return r1 + r2;
-+}
-+#endif
-+
-+#define OP(LOAD, STORE) \
-+ do { \
-+ STORE(LOAD(pixels), block); \
-+ pixels += line_size; \
-+ block += line_size; \
-+ } while (--h)
-+
-+#define OP_X2(LOAD, STORE) \
-+ do { \
-+ uint64_t pix1, pix2; \
-+ \
-+ pix1 = LOAD(pixels); \
-+ pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
-+ STORE(AVG2(pix1, pix2), block); \
-+ pixels += line_size; \
-+ block += line_size; \
-+ } while (--h)
-+
-+#define OP_Y2(LOAD, STORE) \
-+ do { \
-+ uint64_t pix = LOAD(pixels); \
-+ do { \
-+ uint64_t next_pix; \
-+ \
-+ pixels += line_size; \
-+ next_pix = LOAD(pixels); \
-+ STORE(AVG2(pix, next_pix), block); \
-+ block += line_size; \
-+ pix = next_pix; \
-+ } while (--h); \
-+ } while (0)
-+
-+#define OP_XY2(LOAD, STORE) \
-+ do { \
-+ uint64_t pix1 = LOAD(pixels); \
-+ uint64_t pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56); \
-+ uint64_t pix_l = (pix1 & BYTE_VEC(0x03)) \
-+ + (pix2 & BYTE_VEC(0x03)); \
-+ uint64_t pix_h = ((pix1 & ~BYTE_VEC(0x03)) >> 2) \
-+ + ((pix2 & ~BYTE_VEC(0x03)) >> 2); \
-+ \
-+ do { \
-+ uint64_t npix1, npix2; \
-+ uint64_t npix_l, npix_h; \
-+ uint64_t avg; \
-+ \
-+ pixels += line_size; \
-+ npix1 = LOAD(pixels); \
-+ npix2 = npix1 >> 8 | ((uint64_t) pixels[8] << 56); \
-+ npix_l = (npix1 & BYTE_VEC(0x03)) \
-+ + (npix2 & BYTE_VEC(0x03)); \
-+ npix_h = ((npix1 & ~BYTE_VEC(0x03)) >> 2) \
-+ + ((npix2 & ~BYTE_VEC(0x03)) >> 2); \
-+ avg = (((pix_l + npix_l + AVG4_ROUNDER) >> 2) & BYTE_VEC(0x03)) \
-+ + pix_h + npix_h; \
-+ STORE(avg, block); \
-+ \
-+ block += line_size; \
-+ pix_l = npix_l; \
-+ pix_h = npix_h; \
-+ } while (--h); \
-+ } while (0)
-+
-+#define MAKE_OP(OPNAME, SUFF, OPKIND, STORE) \
-+static void OPNAME ## _pixels ## SUFF ## _axp \
-+ (uint8_t *restrict block, const uint8_t *restrict pixels, \
-+ int line_size, int h) \
-+{ \
-+ if ((size_t) pixels & 0x7) { \
-+ OPKIND(uldq, STORE); \
-+ } else { \
-+ OPKIND(ldq, STORE); \
-+ } \
-+} \
-+ \
-+static void OPNAME ## _pixels16 ## SUFF ## _axp \
-+ (uint8_t *restrict block, const uint8_t *restrict pixels, \
-+ int line_size, int h) \
-+{ \
-+ OPNAME ## _pixels ## SUFF ## _axp(block, pixels, line_size, h); \
-+ OPNAME ## _pixels ## SUFF ## _axp(block + 8, pixels + 8, line_size, h); \
-+}
-+
-+#define PIXOP(OPNAME, STORE) \
-+ MAKE_OP(OPNAME, , OP, STORE) \
-+ MAKE_OP(OPNAME, _x2, OP_X2, STORE) \
-+ MAKE_OP(OPNAME, _y2, OP_Y2, STORE) \
-+ MAKE_OP(OPNAME, _xy2, OP_XY2, STORE)
-+
-+/* Rounding primitives. */
-+#define AVG2 avg2
-+#define AVG4 avg4
-+#define AVG4_ROUNDER BYTE_VEC(0x02)
-+#define STORE(l, b) stq(l, b)
-+PIXOP(put, STORE);
-+
-+#undef STORE
-+#define STORE(l, b) stq(AVG2(l, ldq(b)), b);
-+PIXOP(avg, STORE);
-+
-+/* Not rounding primitives. */
-+#undef AVG2
-+#undef AVG4
-+#undef AVG4_ROUNDER
-+#undef STORE
-+#define AVG2 avg2_no_rnd
-+#define AVG4 avg4_no_rnd
-+#define AVG4_ROUNDER BYTE_VEC(0x01)
-+#define STORE(l, b) stq(l, b)
-+PIXOP(put_no_rnd, STORE);
-+
-+#undef STORE
-+#define STORE(l, b) stq(AVG2(l, ldq(b)), b);
-+PIXOP(avg_no_rnd, STORE);
-+
-+void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels,
-+ int line_size, int h)
-+{
-+ put_pixels_axp_asm(block, pixels, line_size, h);
-+ put_pixels_axp_asm(block + 8, pixels + 8, line_size, h);
-+}
-+
-+static int sad16x16_mvi(void *s, uint8_t *a, uint8_t *b, int stride)
-+{
-+ return pix_abs16x16_mvi_asm(a, b, stride);
-+}
-+
-+static int sad8x8_mvi(void *s, uint8_t *a, uint8_t *b, int stride)
-+{
-+ return pix_abs8x8_mvi(a, b, stride);
-+}
-+
-+void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
-+{
-+ c->put_pixels_tab[0][0] = put_pixels16_axp_asm;
-+ c->put_pixels_tab[0][1] = put_pixels16_x2_axp;
-+ c->put_pixels_tab[0][2] = put_pixels16_y2_axp;
-+ c->put_pixels_tab[0][3] = put_pixels16_xy2_axp;
-+
-+ c->put_no_rnd_pixels_tab[0][0] = put_pixels16_axp_asm;
-+ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_axp;
-+ c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_axp;
-+ c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_axp;
-+
-+ c->avg_pixels_tab[0][0] = avg_pixels16_axp;
-+ c->avg_pixels_tab[0][1] = avg_pixels16_x2_axp;
-+ c->avg_pixels_tab[0][2] = avg_pixels16_y2_axp;
-+ c->avg_pixels_tab[0][3] = avg_pixels16_xy2_axp;
-+
-+ c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_axp;
-+ c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_axp;
-+ c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_axp;
-+ c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_axp;
-+
-+ c->put_pixels_tab[1][0] = put_pixels_axp_asm;
-+ c->put_pixels_tab[1][1] = put_pixels_x2_axp;
-+ c->put_pixels_tab[1][2] = put_pixels_y2_axp;
-+ c->put_pixels_tab[1][3] = put_pixels_xy2_axp;
-+
-+ c->put_no_rnd_pixels_tab[1][0] = put_pixels_axp_asm;
-+ c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels_x2_axp;
-+ c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels_y2_axp;
-+ c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels_xy2_axp;
-+
-+ c->avg_pixels_tab[1][0] = avg_pixels_axp;
-+ c->avg_pixels_tab[1][1] = avg_pixels_x2_axp;
-+ c->avg_pixels_tab[1][2] = avg_pixels_y2_axp;
-+ c->avg_pixels_tab[1][3] = avg_pixels_xy2_axp;
-+
-+ c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels_axp;
-+ c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels_x2_axp;
-+ c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels_y2_axp;
-+ c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels_xy2_axp;
-+
-+ c->clear_blocks = clear_blocks_axp;
-+
-+ /* amask clears all bits that correspond to present features. */
-+ if (amask(AMASK_MVI) == 0) {
-+ c->put_pixels_clamped = put_pixels_clamped_mvi_asm;
-+ c->add_pixels_clamped = add_pixels_clamped_mvi_asm;
-+
-+ c->get_pixels = get_pixels_mvi;
-+ c->diff_pixels = diff_pixels_mvi;
-+ c->sad[0] = sad16x16_mvi;
-+ c->sad[1] = sad8x8_mvi;
-+ c->pix_abs8x8 = pix_abs8x8_mvi;
-+ c->pix_abs16x16 = pix_abs16x16_mvi_asm;
-+ c->pix_abs16x16_x2 = pix_abs16x16_x2_mvi;
-+ c->pix_abs16x16_y2 = pix_abs16x16_y2_mvi;
-+ c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mvi;
-+ }
-+
-+ put_pixels_clamped_axp_p = c->put_pixels_clamped;
-+ add_pixels_clamped_axp_p = c->add_pixels_clamped;
-+
-+ c->idct_put = simple_idct_put_axp;
-+ c->idct_add = simple_idct_add_axp;
-+ c->idct = simple_idct_axp;
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/Makefile.am avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/Makefile.am
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/Makefile.am 2003-05-25 23:07:42.000000000 +0200
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/Makefile.am 2003-11-13 23:51:25.426454176 +0100
-@@ -7,10 +7,12 @@
+--- avifile-0.7-0.7.41/ffmpeg/libavcodec/alpha/Makefile.am.orig 2004-03-09 12:24:34.000000000 +0100
++++ avifile-0.7-0.7.41/ffmpeg/libavcodec/alpha/Makefile.am 2004-12-05 01:36:57.171071648 +0100
+@@ -6,12 +6,12 @@
dsputil_alpha.c \
motion_est_alpha.c \
mpegvideo_alpha.c \
+ simple_idct_alpha.c \
+ dsputil_alpha_asm.S \
+ motion_est_mvi_asm.S
- endif
-
--noinst_HEADERS = asm.h dsputil_alpha_asm.S regdef.h motion_est_mvi_asm.S
-+noinst_HEADERS = asm.h regdef.h
-
- libavcodecalpha_la_SOURCES = $(ALPHA_SRC)
-
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/motion_est_alpha.c avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/motion_est_alpha.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/motion_est_alpha.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/motion_est_alpha.c 2003-09-28 17:26:39.000000000 +0200
-@@ -0,0 +1,347 @@
-+/*
-+ * Alpha optimized DSP utils
-+ * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include "asm.h"
-+#include "../dsputil.h"
-+
-+void get_pixels_mvi(DCTELEM *restrict block,
-+ const uint8_t *restrict pixels, int line_size)
-+{
-+ int h = 8;
-+
-+ do {
-+ uint64_t p;
-+
-+ p = ldq(pixels);
-+ stq(unpkbw(p), block);
-+ stq(unpkbw(p >> 32), block + 4);
-+
-+ pixels += line_size;
-+ block += 8;
-+ } while (--h);
-+}
-+
-+void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
-+ int stride) {
-+ int h = 8;
-+ uint64_t mask = 0x4040;
-+
-+ mask |= mask << 16;
-+ mask |= mask << 32;
-+ do {
-+ uint64_t x, y, c, d, a;
-+ uint64_t signs;
-+
-+ x = ldq(s1);
-+ y = ldq(s2);
-+ c = cmpbge(x, y);
-+ d = x - y;
-+ a = zap(mask, c); /* We use 0x4040404040404040 here... */
-+ d += 4 * a; /* ...so we can use s4addq here. */
-+ signs = zap(-1, c);
-+
-+ stq(unpkbw(d) | (unpkbw(signs) << 8), block);
-+ stq(unpkbw(d >> 32) | (unpkbw(signs >> 32) << 8), block + 4);
-+
-+ s1 += stride;
-+ s2 += stride;
-+ block += 8;
-+ } while (--h);
-+}
-+
-+static inline uint64_t avg2(uint64_t a, uint64_t b)
-+{
-+ return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
-+}
-+
-+static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
-+{
-+ uint64_t r1 = ((l1 & ~BYTE_VEC(0x03)) >> 2)
-+ + ((l2 & ~BYTE_VEC(0x03)) >> 2)
-+ + ((l3 & ~BYTE_VEC(0x03)) >> 2)
-+ + ((l4 & ~BYTE_VEC(0x03)) >> 2);
-+ uint64_t r2 = (( (l1 & BYTE_VEC(0x03))
-+ + (l2 & BYTE_VEC(0x03))
-+ + (l3 & BYTE_VEC(0x03))
-+ + (l4 & BYTE_VEC(0x03))
-+ + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
-+ return r1 + r2;
-+}
-+
-+int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int result = 0;
-+ int h = 8;
-+
-+ if ((size_t) pix2 & 0x7) {
-+ /* works only when pix2 is actually unaligned */
-+ do { /* do 8 pixel a time */
-+ uint64_t p1, p2;
-+
-+ p1 = ldq(pix1);
-+ p2 = uldq(pix2);
-+ result += perr(p1, p2);
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+ } while (--h);
-+ } else {
-+ do {
-+ uint64_t p1, p2;
-+
-+ p1 = ldq(pix1);
-+ p2 = ldq(pix2);
-+ result += perr(p1, p2);
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+ } while (--h);
-+ }
-+
-+ return result;
-+}
-+
-+#if 0 /* now done in assembly */
-+int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int result = 0;
-+ int h = 16;
-+
-+ if ((size_t) pix2 & 0x7) {
-+ /* works only when pix2 is actually unaligned */
-+ do { /* do 16 pixel a time */
-+ uint64_t p1_l, p1_r, p2_l, p2_r;
-+ uint64_t t;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+ t = ldq_u(pix2 + 8);
-+ p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
-+ p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
-+ pix1 += line_size;
-+ pix2 += line_size;
-+
-+ result += perr(p1_l, p2_l)
-+ + perr(p1_r, p2_r);
-+ } while (--h);
-+ } else {
-+ do {
-+ uint64_t p1_l, p1_r, p2_l, p2_r;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+ p2_l = ldq(pix2);
-+ p2_r = ldq(pix2 + 8);
-+ pix1 += line_size;
-+ pix2 += line_size;
-+
-+ result += perr(p1_l, p2_l)
-+ + perr(p1_r, p2_r);
-+ } while (--h);
-+ }
-+
-+ return result;
-+}
-+#endif
-+
-+int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int result = 0;
-+ int h = 16;
-+ uint64_t disalign = (size_t) pix2 & 0x7;
-+
-+ switch (disalign) {
-+ case 0:
-+ do {
-+ uint64_t p1_l, p1_r, p2_l, p2_r;
-+ uint64_t l, r;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+ l = ldq(pix2);
-+ r = ldq(pix2 + 8);
-+ p2_l = avg2(l, (l >> 8) | ((uint64_t) r << 56));
-+ p2_r = avg2(r, (r >> 8) | ((uint64_t) pix2[16] << 56));
-+ pix1 += line_size;
-+ pix2 += line_size;
-+
-+ result += perr(p1_l, p2_l)
-+ + perr(p1_r, p2_r);
-+ } while (--h);
-+ break;
-+ case 7:
-+ /* |.......l|lllllllr|rrrrrrr*|
-+ This case is special because disalign1 would be 8, which
-+ gets treated as 0 by extqh. At least it is a bit faster
-+ that way :) */
-+ do {
-+ uint64_t p1_l, p1_r, p2_l, p2_r;
-+ uint64_t l, m, r;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+ l = ldq_u(pix2);
-+ m = ldq_u(pix2 + 8);
-+ r = ldq_u(pix2 + 16);
-+ p2_l = avg2(extql(l, disalign) | extqh(m, disalign), m);
-+ p2_r = avg2(extql(m, disalign) | extqh(r, disalign), r);
-+ pix1 += line_size;
-+ pix2 += line_size;
-+
-+ result += perr(p1_l, p2_l)
-+ + perr(p1_r, p2_r);
-+ } while (--h);
-+ break;
-+ default:
-+ do {
-+ uint64_t disalign1 = disalign + 1;
-+ uint64_t p1_l, p1_r, p2_l, p2_r;
-+ uint64_t l, m, r;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+ l = ldq_u(pix2);
-+ m = ldq_u(pix2 + 8);
-+ r = ldq_u(pix2 + 16);
-+ p2_l = avg2(extql(l, disalign) | extqh(m, disalign),
-+ extql(l, disalign1) | extqh(m, disalign1));
-+ p2_r = avg2(extql(m, disalign) | extqh(r, disalign),
-+ extql(m, disalign1) | extqh(r, disalign1));
-+ pix1 += line_size;
-+ pix2 += line_size;
-+
-+ result += perr(p1_l, p2_l)
-+ + perr(p1_r, p2_r);
-+ } while (--h);
-+ break;
-+ }
-+ return result;
-+}
-+
-+int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int result = 0;
-+ int h = 16;
-+
-+ if ((size_t) pix2 & 0x7) {
-+ uint64_t t, p2_l, p2_r;
-+ t = ldq_u(pix2 + 8);
-+ p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
-+ p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
-+
-+ do {
-+ uint64_t p1_l, p1_r, np2_l, np2_r;
-+ uint64_t t;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+ pix2 += line_size;
-+ t = ldq_u(pix2 + 8);
-+ np2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
-+ np2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
-+
-+ result += perr(p1_l, avg2(p2_l, np2_l))
-+ + perr(p1_r, avg2(p2_r, np2_r));
-+
-+ pix1 += line_size;
-+ p2_l = np2_l;
-+ p2_r = np2_r;
-+
-+ } while (--h);
-+ } else {
-+ uint64_t p2_l, p2_r;
-+ p2_l = ldq(pix2);
-+ p2_r = ldq(pix2 + 8);
-+ do {
-+ uint64_t p1_l, p1_r, np2_l, np2_r;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+ pix2 += line_size;
-+ np2_l = ldq(pix2);
-+ np2_r = ldq(pix2 + 8);
-+
-+ result += perr(p1_l, avg2(p2_l, np2_l))
-+ + perr(p1_r, avg2(p2_r, np2_r));
-+
-+ pix1 += line_size;
-+ p2_l = np2_l;
-+ p2_r = np2_r;
-+ } while (--h);
-+ }
-+ return result;
-+}
-+
-+int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int result = 0;
-+ int h = 16;
-+
-+ uint64_t p1_l, p1_r;
-+ uint64_t p2_l, p2_r, p2_x;
-+
-+ p1_l = ldq(pix1);
-+ p1_r = ldq(pix1 + 8);
-+
-+ if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
-+ p2_l = uldq(pix2);
-+ p2_r = uldq(pix2 + 8);
-+ p2_x = (uint64_t) pix2[16] << 56;
-+ } else {
-+ p2_l = ldq(pix2);
-+ p2_r = ldq(pix2 + 8);
-+ p2_x = ldq(pix2 + 16) << 56;
-+ }
-+
-+ do {
-+ uint64_t np1_l, np1_r;
-+ uint64_t np2_l, np2_r, np2_x;
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+
-+ np1_l = ldq(pix1);
-+ np1_r = ldq(pix1 + 8);
-+
-+ if ((size_t) pix2 & 0x7) { /* could be optimized a lot */
-+ np2_l = uldq(pix2);
-+ np2_r = uldq(pix2 + 8);
-+ np2_x = (uint64_t) pix2[16] << 56;
-+ } else {
-+ np2_l = ldq(pix2);
-+ np2_r = ldq(pix2 + 8);
-+ np2_x = ldq(pix2 + 16) << 56;
-+ }
-+
-+ result += perr(p1_l,
-+ avg4( p2_l, ( p2_l >> 8) | ((uint64_t) p2_r << 56),
-+ np2_l, (np2_l >> 8) | ((uint64_t) np2_r << 56)))
-+ + perr(p1_r,
-+ avg4( p2_r, ( p2_r >> 8) | ((uint64_t) p2_x),
-+ np2_r, (np2_r >> 8) | ((uint64_t) np2_x)));
-+
-+ p1_l = np1_l;
-+ p1_r = np1_r;
-+ p2_l = np2_l;
-+ p2_r = np2_r;
-+ p2_x = np2_x;
-+ } while (--h);
-+
-+ return result;
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/mpegvideo_alpha.c avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/mpegvideo_alpha.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/mpegvideo_alpha.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/mpegvideo_alpha.c 2003-09-28 17:26:39.000000000 +0200
-@@ -0,0 +1,96 @@
-+/*
-+ * Alpha optimized DSP utils
-+ * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include "asm.h"
-+#include "../dsputil.h"
-+#include "../mpegvideo.h"
-+
-+static void dct_unquantize_h263_axp(MpegEncContext *s, DCTELEM *block,
-+ int n, int qscale)
-+{
-+ int i, n_coeffs;
-+ uint64_t qmul, qadd;
-+ uint64_t correction;
-+ DCTELEM *orig_block = block;
-+ DCTELEM block0;
-+
-+ qadd = WORD_VEC((qscale - 1) | 1);
-+ qmul = qscale << 1;
-+ /* This mask kills spill from negative subwords to the next subword. */
-+ correction = WORD_VEC((qmul - 1) + 1); /* multiplication / addition */
-+
-+ if (s->mb_intra) {
-+ if (!s->h263_aic) {
-+ if (n < 4)
-+ block0 = block[0] * s->y_dc_scale;
-+ else
-+ block0 = block[0] * s->c_dc_scale;
-+ } else {
-+ qadd = 0;
-+ }
-+ n_coeffs = 63; // does not always use zigzag table
-+ } else {
-+ n_coeffs = s->intra_scantable.raster_end[s->block_last_index[n]];
-+ }
-+
-+ for(i = 0; i <= n_coeffs; block += 4, i += 4) {
-+ uint64_t levels, negmask, zeros, add;
-+
-+ levels = ldq(block);
-+ if (levels == 0)
-+ continue;
-+
-+#ifdef __alpha_max__
-+ /* I don't think the speed difference justifies runtime
-+ detection. */
-+ negmask = maxsw4(levels, -1); /* negative -> ffff (-1) */
-+ negmask = minsw4(negmask, 0); /* positive -> 0000 (0) */
-+#else
-+ negmask = cmpbge(WORD_VEC(0x7fff), levels);
-+ negmask &= (negmask >> 1) | (1 << 7);
-+ negmask = zap(-1, negmask);
-+#endif
-+
-+ zeros = cmpbge(0, levels);
-+ zeros &= zeros >> 1;
-+ /* zeros |= zeros << 1 is not needed since qadd <= 255, so
-+ zapping the lower byte suffices. */
-+
-+ levels *= qmul;
-+ levels -= correction & (negmask << 16);
-+
-+ /* Negate qadd for negative levels. */
-+ add = qadd ^ negmask;
-+ add += WORD_VEC(0x0001) & negmask;
-+ /* Set qadd to 0 for levels == 0. */
-+ add = zap(add, zeros);
-+
-+ levels += add;
-+
-+ stq(levels, block);
-+ }
-+
-+ if (s->mb_intra && !s->h263_aic)
-+ orig_block[0] = block0;
-+}
-+
-+void MPV_common_init_axp(MpegEncContext *s)
-+{
-+ s->dct_unquantize_h263 = dct_unquantize_h263_axp;
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/simple_idct_alpha.c avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/simple_idct_alpha.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha.orig/simple_idct_alpha.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/alpha/simple_idct_alpha.c 2003-09-28 17:26:39.000000000 +0200
-@@ -0,0 +1,311 @@
-+/*
-+ * Simple IDCT (Alpha optimized)
-+ *
-+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ * based upon some outcommented c code from mpeg2dec (idct_mmx.c
-+ * written by Aaron Holtzman <aholtzma@ess.engr.uvic.ca>)
-+ *
-+ * Alpha optimiziations by Måns Rullgård <mru@users.sourceforge.net>
-+ * and Falk Hueffner <falk@debian.org>
-+ */
-+
-+#include "asm.h"
-+#include "../dsputil.h"
-+
-+extern void (*put_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels,
-+ int line_size);
-+extern void (*add_pixels_clamped_axp_p)(const DCTELEM *block, uint8_t *pixels,
-+ int line_size);
-+
-+// cos(i * M_PI / 16) * sqrt(2) * (1 << 14)
-+// W4 is actually exactly 16384, but using 16383 works around
-+// accumulating rounding errors for some encoders
-+#define W1 ((int_fast32_t) 22725)
-+#define W2 ((int_fast32_t) 21407)
-+#define W3 ((int_fast32_t) 19266)
-+#define W4 ((int_fast32_t) 16383)
-+#define W5 ((int_fast32_t) 12873)
-+#define W6 ((int_fast32_t) 8867)
-+#define W7 ((int_fast32_t) 4520)
-+#define ROW_SHIFT 11
-+#define COL_SHIFT 20
-+
-+/* 0: all entries 0, 1: only first entry nonzero, 2: otherwise */
-+static inline int idct_row(DCTELEM *row)
-+{
-+ int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3, t;
-+ uint64_t l, r, t2;
-+ l = ldq(row);
-+ r = ldq(row + 4);
-+
-+ if (l == 0 && r == 0)
-+ return 0;
-+
-+ a0 = W4 * sextw(l) + (1 << (ROW_SHIFT - 1));
-+
-+ if (((l & ~0xffffUL) | r) == 0) {
-+ a0 >>= ROW_SHIFT;
-+ t2 = (uint16_t) a0;
-+ t2 |= t2 << 16;
-+ t2 |= t2 << 32;
-+
-+ stq(t2, row);
-+ stq(t2, row + 4);
-+ return 1;
-+ }
-+
-+ a1 = a0;
-+ a2 = a0;
-+ a3 = a0;
-+
-+ t = extwl(l, 4); /* row[2] */
-+ if (t != 0) {
-+ t = sextw(t);
-+ a0 += W2 * t;
-+ a1 += W6 * t;
-+ a2 -= W6 * t;
-+ a3 -= W2 * t;
-+ }
-+
-+ t = extwl(r, 0); /* row[4] */
-+ if (t != 0) {
-+ t = sextw(t);
-+ a0 += W4 * t;
-+ a1 -= W4 * t;
-+ a2 -= W4 * t;
-+ a3 += W4 * t;
-+ }
-+
-+ t = extwl(r, 4); /* row[6] */
-+ if (t != 0) {
-+ t = sextw(t);
-+ a0 += W6 * t;
-+ a1 -= W2 * t;
-+ a2 += W2 * t;
-+ a3 -= W6 * t;
-+ }
-+
-+ t = extwl(l, 2); /* row[1] */
-+ if (t != 0) {
-+ t = sextw(t);
-+ b0 = W1 * t;
-+ b1 = W3 * t;
-+ b2 = W5 * t;
-+ b3 = W7 * t;
-+ } else {
-+ b0 = 0;
-+ b1 = 0;
-+ b2 = 0;
-+ b3 = 0;
-+ }
-+
-+ t = extwl(l, 6); /* row[3] */
-+ if (t) {
-+ t = sextw(t);
-+ b0 += W3 * t;
-+ b1 -= W7 * t;
-+ b2 -= W1 * t;
-+ b3 -= W5 * t;
-+ }
-+
-+
-+ t = extwl(r, 2); /* row[5] */
-+ if (t) {
-+ t = sextw(t);
-+ b0 += W5 * t;
-+ b1 -= W1 * t;
-+ b2 += W7 * t;
-+ b3 += W3 * t;
-+ }
-+
-+ t = extwl(r, 6); /* row[7] */
-+ if (t) {
-+ t = sextw(t);
-+ b0 += W7 * t;
-+ b1 -= W5 * t;
-+ b2 += W3 * t;
-+ b3 -= W1 * t;
-+ }
-+
-+ row[0] = (a0 + b0) >> ROW_SHIFT;
-+ row[1] = (a1 + b1) >> ROW_SHIFT;
-+ row[2] = (a2 + b2) >> ROW_SHIFT;
-+ row[3] = (a3 + b3) >> ROW_SHIFT;
-+ row[4] = (a3 - b3) >> ROW_SHIFT;
-+ row[5] = (a2 - b2) >> ROW_SHIFT;
-+ row[6] = (a1 - b1) >> ROW_SHIFT;
-+ row[7] = (a0 - b0) >> ROW_SHIFT;
-+
-+ return 2;
-+}
-+
-+static inline void idct_col(DCTELEM *col)
-+{
-+ int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3;
-+
-+ col[0] += (1 << (COL_SHIFT - 1)) / W4;
-+
-+ a0 = W4 * col[8 * 0];
-+ a1 = W4 * col[8 * 0];
-+ a2 = W4 * col[8 * 0];
-+ a3 = W4 * col[8 * 0];
-+
-+ if (col[8 * 2]) {
-+ a0 += W2 * col[8 * 2];
-+ a1 += W6 * col[8 * 2];
-+ a2 -= W6 * col[8 * 2];
-+ a3 -= W2 * col[8 * 2];
-+ }
-+
-+ if (col[8 * 4]) {
-+ a0 += W4 * col[8 * 4];
-+ a1 -= W4 * col[8 * 4];
-+ a2 -= W4 * col[8 * 4];
-+ a3 += W4 * col[8 * 4];
-+ }
-+
-+ if (col[8 * 6]) {
-+ a0 += W6 * col[8 * 6];
-+ a1 -= W2 * col[8 * 6];
-+ a2 += W2 * col[8 * 6];
-+ a3 -= W6 * col[8 * 6];
-+ }
-+
-+ if (col[8 * 1]) {
-+ b0 = W1 * col[8 * 1];
-+ b1 = W3 * col[8 * 1];
-+ b2 = W5 * col[8 * 1];
-+ b3 = W7 * col[8 * 1];
-+ } else {
-+ b0 = 0;
-+ b1 = 0;
-+ b2 = 0;
-+ b3 = 0;
-+ }
-+
-+ if (col[8 * 3]) {
-+ b0 += W3 * col[8 * 3];
-+ b1 -= W7 * col[8 * 3];
-+ b2 -= W1 * col[8 * 3];
-+ b3 -= W5 * col[8 * 3];
-+ }
-+
-+ if (col[8 * 5]) {
-+ b0 += W5 * col[8 * 5];
-+ b1 -= W1 * col[8 * 5];
-+ b2 += W7 * col[8 * 5];
-+ b3 += W3 * col[8 * 5];
-+ }
-+
-+ if (col[8 * 7]) {
-+ b0 += W7 * col[8 * 7];
-+ b1 -= W5 * col[8 * 7];
-+ b2 += W3 * col[8 * 7];
-+ b3 -= W1 * col[8 * 7];
-+ }
-+
-+ col[8 * 0] = (a0 + b0) >> COL_SHIFT;
-+ col[8 * 7] = (a0 - b0) >> COL_SHIFT;
-+ col[8 * 1] = (a1 + b1) >> COL_SHIFT;
-+ col[8 * 6] = (a1 - b1) >> COL_SHIFT;
-+ col[8 * 2] = (a2 + b2) >> COL_SHIFT;
-+ col[8 * 5] = (a2 - b2) >> COL_SHIFT;
-+ col[8 * 3] = (a3 + b3) >> COL_SHIFT;
-+ col[8 * 4] = (a3 - b3) >> COL_SHIFT;
-+}
-+
-+/* If all rows but the first one are zero after row transformation,
-+ all rows will be identical after column transformation. */
-+static inline void idct_col2(DCTELEM *col)
-+{
-+ int i;
-+ uint64_t l, r;
-+ uint64_t *lcol = (uint64_t *) col;
-+
-+ for (i = 0; i < 8; ++i) {
-+ int_fast32_t a0 = col[0] + (1 << (COL_SHIFT - 1)) / W4;
-+
-+ a0 *= W4;
-+ col[0] = a0 >> COL_SHIFT;
-+ ++col;
-+ }
-+
-+ l = lcol[0];
-+ r = lcol[1];
-+ lcol[ 2] = l; lcol[ 3] = r;
-+ lcol[ 4] = l; lcol[ 5] = r;
-+ lcol[ 6] = l; lcol[ 7] = r;
-+ lcol[ 8] = l; lcol[ 9] = r;
-+ lcol[10] = l; lcol[11] = r;
-+ lcol[12] = l; lcol[13] = r;
-+ lcol[14] = l; lcol[15] = r;
-+}
-+
-+void simple_idct_axp(DCTELEM *block)
-+{
-+
-+ int i;
-+ int rowsZero = 1; /* all rows except row 0 zero */
-+ int rowsConstant = 1; /* all rows consist of a constant value */
-+
-+ for (i = 0; i < 8; i++) {
-+ int sparseness = idct_row(block + 8 * i);
-+
-+ if (i > 0 && sparseness > 0)
-+ rowsZero = 0;
-+ if (sparseness == 2)
-+ rowsConstant = 0;
-+ }
-+
-+ if (rowsZero) {
-+ idct_col2(block);
-+ } else if (rowsConstant) {
-+ uint64_t *lblock = (uint64_t *) block;
-+
-+ idct_col(block);
-+ for (i = 0; i < 8; i += 2) {
-+ uint64_t v = (uint16_t) block[i * 8];
-+ uint64_t w = (uint16_t) block[i * 8 + 8];
-+
-+ v |= v << 16;
-+ w |= w << 16;
-+ v |= v << 32;
-+ w |= w << 32;
-+ lblock[0] = v;
-+ lblock[1] = v;
-+ lblock[2] = w;
-+ lblock[3] = w;
-+ lblock += 4;
-+ }
-+ } else {
-+ for (i = 0; i < 8; i++)
-+ idct_col(block + i);
-+ }
-+}
-+
-+void simple_idct_put_axp(uint8_t *dest, int line_size, DCTELEM *block)
-+{
-+ simple_idct_axp(block);
-+ put_pixels_clamped_axp_p(block, dest, line_size);
-+}
-+
-+void simple_idct_add_axp(uint8_t *dest, int line_size, DCTELEM *block)
-+{
-+ simple_idct_axp(block);
-+ add_pixels_clamped_axp_p(block, dest, line_size);
-+}
+
+ noinst_HEADERS = $(ALPHA_SOURCES) \
+ asm.h \
+- dsputil_alpha_asm.S \
+- motion_est_mvi_asm.S \
+ regdef.h
+
+ if AMM_FF_ALPHAOPT
--- avifile-0.7-0.7.38/configure.in.orig 2003-07-10 13:15:54.000000000 +0200
+++ avifile-0.7-0.7.38/configure.in 2003-11-14 00:09:16.019699264 +0100
@@ -57,6 +57,7 @@
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/dsputil_altivec.c avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/dsputil_altivec.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/dsputil_altivec.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/dsputil_altivec.c 2003-09-28 17:26:40.000000000 +0200
-@@ -0,0 +1,1345 @@
-+/*
-+ * Copyright (c) 2002 Brian Foley
-+ * Copyright (c) 2002 Dieter Shirley
-+ * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include "../dsputil.h"
-+
-+#include "gcc_fixes.h"
-+
-+#include "dsputil_altivec.h"
-+
-+#ifdef CONFIG_DARWIN
-+#include <sys/sysctl.h>
-+#else /* CONFIG_DARWIN */
-+#include <signal.h>
-+#include <setjmp.h>
-+
-+static sigjmp_buf jmpbuf;
-+static volatile sig_atomic_t canjump = 0;
-+
-+static void sigill_handler (int sig)
-+{
-+ if (!canjump) {
-+ signal (sig, SIG_DFL);
-+ raise (sig);
-+ }
-+
-+ canjump = 0;
-+ siglongjmp (jmpbuf, 1);
-+}
-+#endif /* CONFIG_DARWIN */
-+
-+int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
-+ vector unsigned char *tv;
-+ vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
-+ vector unsigned int sad;
-+ vector signed int sumdiffs;
-+
-+ s = 0;
-+ sad = (vector unsigned int)vec_splat_u32(0);
-+ for(i=0;i<16;i++) {
-+ /*
-+ Read unaligned pixels into our vectors. The vectors are as follows:
-+ pix1v: pix1[0]-pix1[15]
-+ pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
-+ */
-+ tv = (vector unsigned char *) pix1;
-+ pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
-+
-+ tv = (vector unsigned char *) &pix2[0];
-+ pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
-+
-+ tv = (vector unsigned char *) &pix2[1];
-+ pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
-+
-+ /* Calculate the average vector */
-+ avgv = vec_avg(pix2v, pix2iv);
-+
-+ /* Calculate a sum of abs differences vector */
-+ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
-+
-+ /* Add each 4 pixel group together and put 4 results into sad */
-+ sad = vec_sum4s(t5, sad);
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+ }
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
-+ sumdiffs = vec_splat(sumdiffs, 3);
-+ vec_ste(sumdiffs, 0, &s);
-+
-+ return s;
-+}
-+
-+int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
-+ vector unsigned char *tv;
-+ vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
-+ vector unsigned int sad;
-+ vector signed int sumdiffs;
-+ uint8_t *pix3 = pix2 + line_size;
-+
-+ s = 0;
-+ sad = (vector unsigned int)vec_splat_u32(0);
-+
-+ /*
-+ Due to the fact that pix3 = pix2 + line_size, the pix3 of one
-+ iteration becomes pix2 in the next iteration. We can use this
-+ fact to avoid a potentially expensive unaligned read, each
-+ time around the loop.
-+ Read unaligned pixels into our vectors. The vectors are as follows:
-+ pix2v: pix2[0]-pix2[15]
-+ Split the pixel vectors into shorts
-+ */
-+ tv = (vector unsigned char *) &pix2[0];
-+ pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
-+
-+ for(i=0;i<16;i++) {
-+ /*
-+ Read unaligned pixels into our vectors. The vectors are as follows:
-+ pix1v: pix1[0]-pix1[15]
-+ pix3v: pix3[0]-pix3[15]
-+ */
-+ tv = (vector unsigned char *) pix1;
-+ pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
-+
-+ tv = (vector unsigned char *) &pix3[0];
-+ pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
-+
-+ /* Calculate the average vector */
-+ avgv = vec_avg(pix2v, pix3v);
-+
-+ /* Calculate a sum of abs differences vector */
-+ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
-+
-+ /* Add each 4 pixel group together and put 4 results into sad */
-+ sad = vec_sum4s(t5, sad);
-+
-+ pix1 += line_size;
-+ pix2v = pix3v;
-+ pix3 += line_size;
-+
-+ }
-+
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
-+ sumdiffs = vec_splat(sumdiffs, 3);
-+ vec_ste(sumdiffs, 0, &s);
-+ return s;
-+}
-+
-+int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ uint8_t *pix3 = pix2 + line_size;
-+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
-+ const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
-+ vector unsigned char *tv, avgv, t5;
-+ vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
-+ vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
-+ vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
-+ vector unsigned short avghv, avglv;
-+ vector unsigned short t1, t2, t3, t4;
-+ vector unsigned int sad;
-+ vector signed int sumdiffs;
-+
-+ sad = (vector unsigned int)vec_splat_u32(0);
-+
-+ s = 0;
-+
-+ /*
-+ Due to the fact that pix3 = pix2 + line_size, the pix3 of one
-+ iteration becomes pix2 in the next iteration. We can use this
-+ fact to avoid a potentially expensive unaligned read, as well
-+ as some splitting, and vector addition each time around the loop.
-+ Read unaligned pixels into our vectors. The vectors are as follows:
-+ pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16]
-+ Split the pixel vectors into shorts
-+ */
-+ tv = (vector unsigned char *) &pix2[0];
-+ pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
-+
-+ tv = (vector unsigned char *) &pix2[1];
-+ pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
-+
-+ pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
-+ pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
-+ pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
-+ pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
-+ t1 = vec_add(pix2hv, pix2ihv);
-+ t2 = vec_add(pix2lv, pix2ilv);
-+
-+ for(i=0;i<16;i++) {
-+ /*
-+ Read unaligned pixels into our vectors. The vectors are as follows:
-+ pix1v: pix1[0]-pix1[15]
-+ pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16]
-+ */
-+ tv = (vector unsigned char *) pix1;
-+ pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
-+
-+ tv = (vector unsigned char *) &pix3[0];
-+ pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
-+
-+ tv = (vector unsigned char *) &pix3[1];
-+ pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
-+
-+ /*
-+ Note that Altivec does have vec_avg, but this works on vector pairs
-+ and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
-+ would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
-+ Instead, we have to split the pixel vectors into vectors of shorts,
-+ and do the averaging by hand.
-+ */
-+
-+ /* Split the pixel vectors into shorts */
-+ pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
-+ pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
-+ pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
-+ pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
-+
-+ /* Do the averaging on them */
-+ t3 = vec_add(pix3hv, pix3ihv);
-+ t4 = vec_add(pix3lv, pix3ilv);
-+
-+ avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
-+ avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
-+
-+ /* Pack the shorts back into a result */
-+ avgv = vec_pack(avghv, avglv);
-+
-+ /* Calculate a sum of abs differences vector */
-+ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
-+
-+ /* Add each 4 pixel group together and put 4 results into sad */
-+ sad = vec_sum4s(t5, sad);
-+
-+ pix1 += line_size;
-+ pix3 += line_size;
-+ /* Transfer the calculated values for pix3 into pix2 */
-+ t1 = t3;
-+ t2 = t4;
-+ }
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
-+ sumdiffs = vec_splat(sumdiffs, 3);
-+ vec_ste(sumdiffs, 0, &s);
-+
-+ return s;
-+}
-+
-+int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
-+ vector unsigned char perm1, perm2, *pix1v, *pix2v;
-+ vector unsigned char t1, t2, t3,t4, t5;
-+ vector unsigned int sad;
-+ vector signed int sumdiffs;
-+
-+ sad = (vector unsigned int)vec_splat_u32(0);
-+
-+
-+ for(i=0;i<16;i++) {
-+ /* Read potentially unaligned pixels into t1 and t2 */
-+ perm1 = vec_lvsl(0, pix1);
-+ pix1v = (vector unsigned char *) pix1;
-+ perm2 = vec_lvsl(0, pix2);
-+ pix2v = (vector unsigned char *) pix2;
-+ t1 = vec_perm(pix1v[0], pix1v[1], perm1);
-+ t2 = vec_perm(pix2v[0], pix2v[1], perm2);
-+
-+ /* Calculate a sum of abs differences vector */
-+ t3 = vec_max(t1, t2);
-+ t4 = vec_min(t1, t2);
-+ t5 = vec_sub(t3, t4);
-+
-+ /* Add each 4 pixel group together and put 4 results into sad */
-+ sad = vec_sum4s(t5, sad);
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+ }
-+
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
-+ sumdiffs = vec_splat(sumdiffs, 3);
-+ vec_ste(sumdiffs, 0, &s);
-+
-+ return s;
-+}
-+
-+int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
-+ vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
-+ vector unsigned char t1, t2, t3,t4, t5;
-+ vector unsigned int sad;
-+ vector signed int sumdiffs;
-+
-+ sad = (vector unsigned int)vec_splat_u32(0);
-+
-+ permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
-+
-+ for(i=0;i<8;i++) {
-+ /* Read potentially unaligned pixels into t1 and t2
-+ Since we're reading 16 pixels, and actually only want 8,
-+ mask out the last 8 pixels. The 0s don't change the sum. */
-+ perm1 = vec_lvsl(0, pix1);
-+ pix1v = (vector unsigned char *) pix1;
-+ perm2 = vec_lvsl(0, pix2);
-+ pix2v = (vector unsigned char *) pix2;
-+ t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
-+ t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
-+
-+ /* Calculate a sum of abs differences vector */
-+ t3 = vec_max(t1, t2);
-+ t4 = vec_min(t1, t2);
-+ t5 = vec_sub(t3, t4);
-+
-+ /* Add each 4 pixel group together and put 4 results into sad */
-+ sad = vec_sum4s(t5, sad);
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+ }
-+
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
-+ sumdiffs = vec_splat(sumdiffs, 3);
-+ vec_ste(sumdiffs, 0, &s);
-+
-+ return s;
-+}
-+
-+int pix_norm1_altivec(uint8_t *pix, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
-+ vector unsigned char *tv;
-+ vector unsigned char pixv;
-+ vector unsigned int sv;
-+ vector signed int sum;
-+
-+ sv = (vector unsigned int)vec_splat_u32(0);
-+
-+ s = 0;
-+ for (i = 0; i < 16; i++) {
-+ /* Read in the potentially unaligned pixels */
-+ tv = (vector unsigned char *) pix;
-+ pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
-+
-+ /* Square the values, and add them to our sum */
-+ sv = vec_msum(pixv, pixv, sv);
-+
-+ pix += line_size;
-+ }
-+ /* Sum up the four partial sums, and put the result into s */
-+ sum = vec_sums((vector signed int) sv, (vector signed int) zero);
-+ sum = vec_splat(sum, 3);
-+ vec_ste(sum, 0, &s);
-+
-+ return s;
-+}
-+
-+/**
-+ * Sum of Squared Errors for a 8x8 block.
-+ * AltiVec-enhanced.
-+ * It's the pix_abs8x8_altivec code above w/ squaring added.
-+ */
-+int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
-+ vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
-+ vector unsigned char t1, t2, t3,t4, t5;
-+ vector unsigned int sum;
-+ vector signed int sumsqr;
-+
-+ sum = (vector unsigned int)vec_splat_u32(0);
-+
-+ permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
-+
-+
-+ for(i=0;i<8;i++) {
-+ /* Read potentially unaligned pixels into t1 and t2
-+ Since we're reading 16 pixels, and actually only want 8,
-+ mask out the last 8 pixels. The 0s don't change the sum. */
-+ perm1 = vec_lvsl(0, pix1);
-+ pix1v = (vector unsigned char *) pix1;
-+ perm2 = vec_lvsl(0, pix2);
-+ pix2v = (vector unsigned char *) pix2;
-+ t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
-+ t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
-+
-+ /*
-+ Since we want to use unsigned chars, we can take advantage
-+ of the fact that abs(a-b)^2 = (a-b)^2.
-+ */
-+
-+ /* Calculate abs differences vector */
-+ t3 = vec_max(t1, t2);
-+ t4 = vec_min(t1, t2);
-+ t5 = vec_sub(t3, t4);
-+
-+ /* Square the values and add them to our sum */
-+ sum = vec_msum(t5, t5, sum);
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+ }
-+
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
-+ sumsqr = vec_splat(sumsqr, 3);
-+ vec_ste(sumsqr, 0, &s);
-+
-+ return s;
-+}
-+
-+/**
-+ * Sum of Squared Errors for a 16x16 block.
-+ * AltiVec-enhanced.
-+ * It's the pix_abs16x16_altivec code above w/ squaring added.
-+ */
-+int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
-+{
-+ int i;
-+ int s __attribute__((aligned(16)));
-+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
-+ vector unsigned char perm1, perm2, *pix1v, *pix2v;
-+ vector unsigned char t1, t2, t3,t4, t5;
-+ vector unsigned int sum;
-+ vector signed int sumsqr;
-+
-+ sum = (vector unsigned int)vec_splat_u32(0);
-+
-+ for(i=0;i<16;i++) {
-+ /* Read potentially unaligned pixels into t1 and t2 */
-+ perm1 = vec_lvsl(0, pix1);
-+ pix1v = (vector unsigned char *) pix1;
-+ perm2 = vec_lvsl(0, pix2);
-+ pix2v = (vector unsigned char *) pix2;
-+ t1 = vec_perm(pix1v[0], pix1v[1], perm1);
-+ t2 = vec_perm(pix2v[0], pix2v[1], perm2);
-+
-+ /*
-+ Since we want to use unsigned chars, we can take advantage
-+ of the fact that abs(a-b)^2 = (a-b)^2.
-+ */
-+
-+ /* Calculate abs differences vector */
-+ t3 = vec_max(t1, t2);
-+ t4 = vec_min(t1, t2);
-+ t5 = vec_sub(t3, t4);
-+
-+ /* Square the values and add them to our sum */
-+ sum = vec_msum(t5, t5, sum);
-+
-+ pix1 += line_size;
-+ pix2 += line_size;
-+ }
-+
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
-+ sumsqr = vec_splat(sumsqr, 3);
-+ vec_ste(sumsqr, 0, &s);
-+
-+ return s;
-+}
-+
-+int pix_sum_altivec(uint8_t * pix, int line_size)
-+{
-+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
-+ vector unsigned char perm, *pixv;
-+ vector unsigned char t1;
-+ vector unsigned int sad;
-+ vector signed int sumdiffs;
-+
-+ int i;
-+ int s __attribute__((aligned(16)));
-+
-+ sad = (vector unsigned int)vec_splat_u32(0);
-+
-+ for (i = 0; i < 16; i++) {
-+ /* Read the potentially unaligned 16 pixels into t1 */
-+ perm = vec_lvsl(0, pix);
-+ pixv = (vector unsigned char *) pix;
-+ t1 = vec_perm(pixv[0], pixv[1], perm);
-+
-+ /* Add each 4 pixel group together and put 4 results into sad */
-+ sad = vec_sum4s(t1, sad);
-+
-+ pix += line_size;
-+ }
-+
-+ /* Sum up the four partial sums, and put the result into s */
-+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
-+ sumdiffs = vec_splat(sumdiffs, 3);
-+ vec_ste(sumdiffs, 0, &s);
-+
-+ return s;
-+}
-+
-+void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
-+{
-+ int i;
-+ vector unsigned char perm, bytes, *pixv;
-+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
-+ vector signed short shorts;
-+
-+ for(i=0;i<8;i++)
-+ {
-+ // Read potentially unaligned pixels.
-+ // We're reading 16 pixels, and actually only want 8,
-+ // but we simply ignore the extras.
-+ perm = vec_lvsl(0, pixels);
-+ pixv = (vector unsigned char *) pixels;
-+ bytes = vec_perm(pixv[0], pixv[1], perm);
-+
-+ // convert the bytes into shorts
-+ shorts = (vector signed short)vec_mergeh(zero, bytes);
-+
-+ // save the data to the block, we assume the block is 16-byte aligned
-+ vec_st(shorts, i*16, (vector signed short*)block);
-+
-+ pixels += line_size;
-+ }
-+}
-+
-+void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
-+ const uint8_t *s2, int stride)
-+{
-+ int i;
-+ vector unsigned char perm, bytes, *pixv;
-+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
-+ vector signed short shorts1, shorts2;
-+
-+ for(i=0;i<4;i++)
-+ {
-+ // Read potentially unaligned pixels
-+ // We're reading 16 pixels, and actually only want 8,
-+ // but we simply ignore the extras.
-+ perm = vec_lvsl(0, s1);
-+ pixv = (vector unsigned char *) s1;
-+ bytes = vec_perm(pixv[0], pixv[1], perm);
-+
-+ // convert the bytes into shorts
-+ shorts1 = (vector signed short)vec_mergeh(zero, bytes);
-+
-+ // Do the same for the second block of pixels
-+ perm = vec_lvsl(0, s2);
-+ pixv = (vector unsigned char *) s2;
-+ bytes = vec_perm(pixv[0], pixv[1], perm);
-+
-+ // convert the bytes into shorts
-+ shorts2 = (vector signed short)vec_mergeh(zero, bytes);
-+
-+ // Do the subtraction
-+ shorts1 = vec_sub(shorts1, shorts2);
-+
-+ // save the data to the block, we assume the block is 16-byte aligned
-+ vec_st(shorts1, 0, (vector signed short*)block);
-+
-+ s1 += stride;
-+ s2 += stride;
-+ block += 8;
-+
-+
-+ // The code below is a copy of the code above... This is a manual
-+ // unroll.
-+
-+ // Read potentially unaligned pixels
-+ // We're reading 16 pixels, and actually only want 8,
-+ // but we simply ignore the extras.
-+ perm = vec_lvsl(0, s1);
-+ pixv = (vector unsigned char *) s1;
-+ bytes = vec_perm(pixv[0], pixv[1], perm);
-+
-+ // convert the bytes into shorts
-+ shorts1 = (vector signed short)vec_mergeh(zero, bytes);
-+
-+ // Do the same for the second block of pixels
-+ perm = vec_lvsl(0, s2);
-+ pixv = (vector unsigned char *) s2;
-+ bytes = vec_perm(pixv[0], pixv[1], perm);
-+
-+ // convert the bytes into shorts
-+ shorts2 = (vector signed short)vec_mergeh(zero, bytes);
-+
-+ // Do the subtraction
-+ shorts1 = vec_sub(shorts1, shorts2);
-+
-+ // save the data to the block, we assume the block is 16-byte aligned
-+ vec_st(shorts1, 0, (vector signed short*)block);
-+
-+ s1 += stride;
-+ s2 += stride;
-+ block += 8;
-+ }
-+}
-+
-+int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
-+ return pix_abs16x16_altivec(a,b,stride);
-+}
-+
-+int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
-+ return pix_abs8x8_altivec(a,b,stride);
-+}
-+
-+void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int i;
-+ for(i=0; i+7<w; i++){
-+ dst[i+0] += src[i+0];
-+ dst[i+1] += src[i+1];
-+ dst[i+2] += src[i+2];
-+ dst[i+3] += src[i+3];
-+ dst[i+4] += src[i+4];
-+ dst[i+5] += src[i+5];
-+ dst[i+6] += src[i+6];
-+ dst[i+7] += src[i+7];
-+ }
-+ for(; i<w; i++)
-+ dst[i+0] += src[i+0];
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register int i;
-+ register vector unsigned char vdst, vsrc;
-+
-+ /* dst and src are 16 bytes-aligned (guaranteed) */
-+ for(i = 0 ; (i + 15) < w ; i++)
-+ {
-+ vdst = vec_ld(i << 4, (unsigned char*)dst);
-+ vsrc = vec_ld(i << 4, (unsigned char*)src);
-+ vdst = vec_add(vsrc, vdst);
-+ vec_st(vdst, i << 4, (unsigned char*)dst);
-+ }
-+ /* if w is not a multiple of 16 */
-+ for (; (i < w) ; i++)
-+ {
-+ dst[i] = src[i];
-+ }
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+/* next one assumes that ((line_size % 16) == 0) */
-+void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
-+{
-+POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int i;
-+
-+POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
-+
-+ for(i=0; i<h; i++) {
-+ *((uint32_t*)(block )) = (((const struct unaligned_32 *) (pixels))->l);
-+ *((uint32_t*)(block+4)) = (((const struct unaligned_32 *) (pixels+4))->l);
-+ *((uint32_t*)(block+8)) = (((const struct unaligned_32 *) (pixels+8))->l);
-+ *((uint32_t*)(block+12)) = (((const struct unaligned_32 *) (pixels+12))->l);
-+ pixels+=line_size;
-+ block +=line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register vector unsigned char pixelsv1, pixelsv2;
-+ register vector unsigned char pixelsv1B, pixelsv2B;
-+ register vector unsigned char pixelsv1C, pixelsv2C;
-+ register vector unsigned char pixelsv1D, pixelsv2D;
-+
-+ register vector unsigned char perm = vec_lvsl(0, pixels);
-+ int i;
-+ register int line_size_2 = line_size << 1;
-+ register int line_size_3 = line_size + line_size_2;
-+ register int line_size_4 = line_size << 2;
-+
-+POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
-+// hand-unrolling the loop by 4 gains about 15%
-+// mininum execution time goes from 74 to 60 cycles
-+// it's faster than -funroll-loops, but using
-+// -funroll-loops w/ this is bad - 74 cycles again.
-+// all this is on a 7450, tuning for the 7450
-+#if 0
-+ for(i=0; i<h; i++) {
-+ pixelsv1 = vec_ld(0, (unsigned char*)pixels);
-+ pixelsv2 = vec_ld(16, (unsigned char*)pixels);
-+ vec_st(vec_perm(pixelsv1, pixelsv2, perm),
-+ 0, (unsigned char*)block);
-+ pixels+=line_size;
-+ block +=line_size;
-+ }
-+#else
-+ for(i=0; i<h; i+=4) {
-+ pixelsv1 = vec_ld(0, (unsigned char*)pixels);
-+ pixelsv2 = vec_ld(16, (unsigned char*)pixels);
-+ pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
-+ pixelsv2B = vec_ld(16 + line_size, (unsigned char*)pixels);
-+ pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
-+ pixelsv2C = vec_ld(16 + line_size_2, (unsigned char*)pixels);
-+ pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
-+ pixelsv2D = vec_ld(16 + line_size_3, (unsigned char*)pixels);
-+ vec_st(vec_perm(pixelsv1, pixelsv2, perm),
-+ 0, (unsigned char*)block);
-+ vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
-+ line_size, (unsigned char*)block);
-+ vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
-+ line_size_2, (unsigned char*)block);
-+ vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
-+ line_size_3, (unsigned char*)block);
-+ pixels+=line_size_4;
-+ block +=line_size_4;
-+ }
-+#endif
-+POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
-+
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+/* next one assumes that ((line_size % 16) == 0) */
-+#define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
-+void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
-+{
-+POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int i;
-+
-+POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
-+
-+ for(i=0; i<h; i++) {
-+ op_avg(*((uint32_t*)(block)),(((const struct unaligned_32 *)(pixels))->l));
-+ op_avg(*((uint32_t*)(block+4)),(((const struct unaligned_32 *)(pixels+4))->l));
-+ op_avg(*((uint32_t*)(block+8)),(((const struct unaligned_32 *)(pixels+8))->l));
-+ op_avg(*((uint32_t*)(block+12)),(((const struct unaligned_32 *)(pixels+12))->l));
-+ pixels+=line_size;
-+ block +=line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
-+ register vector unsigned char perm = vec_lvsl(0, pixels);
-+ int i;
-+
-+POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
-+
-+ for(i=0; i<h; i++) {
-+ pixelsv1 = vec_ld(0, (unsigned char*)pixels);
-+ pixelsv2 = vec_ld(16, (unsigned char*)pixels);
-+ blockv = vec_ld(0, block);
-+ pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
-+ blockv = vec_avg(blockv,pixelsv);
-+ vec_st(blockv, 0, (unsigned char*)block);
-+ pixels+=line_size;
-+ block +=line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
-+
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+/* next one assumes that ((line_size % 8) == 0) */
-+void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
-+{
-+POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int i;
-+POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
-+ for (i = 0; i < h; i++) {
-+ *((uint32_t *) (block)) =
-+ (((*((uint32_t *) (block))) |
-+ ((((const struct unaligned_32 *) (pixels))->l))) -
-+ ((((*((uint32_t *) (block))) ^
-+ ((((const struct unaligned_32 *) (pixels))->
-+ l))) & 0xFEFEFEFEUL) >> 1));
-+ *((uint32_t *) (block + 4)) =
-+ (((*((uint32_t *) (block + 4))) |
-+ ((((const struct unaligned_32 *) (pixels + 4))->l))) -
-+ ((((*((uint32_t *) (block + 4))) ^
-+ ((((const struct unaligned_32 *) (pixels +
-+ 4))->
-+ l))) & 0xFEFEFEFEUL) >> 1));
-+ pixels += line_size;
-+ block += line_size;
-+ }
-+POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
-+ int i;
-+
-+POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
-+
-+ for (i = 0; i < h; i++) {
-+ /*
-+ block is 8 bytes-aligned, so we're either in the
-+ left block (16 bytes-aligned) or in the right block (not)
-+ */
-+ int rightside = ((unsigned long)block & 0x0000000F);
-+
-+ blockv = vec_ld(0, block);
-+ pixelsv1 = vec_ld(0, (unsigned char*)pixels);
-+ pixelsv2 = vec_ld(16, (unsigned char*)pixels);
-+ pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
-+
-+ if (rightside)
-+ {
-+ pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
-+ }
-+ else
-+ {
-+ pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
-+ }
-+
-+ blockv = vec_avg(blockv, pixelsv);
-+
-+ vec_st(blockv, 0, block);
-+
-+ pixels += line_size;
-+ block += line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
-+
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+/* next one assumes that ((line_size % 8) == 0) */
-+void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
-+{
-+POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int j;
-+POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
-+ for (j = 0; j < 2; j++) {
-+ int i;
-+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ const uint32_t b =
-+ (((const struct unaligned_32 *) (pixels + 1))->l);
-+ uint32_t l0 =
-+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
-+ uint32_t h0 =
-+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ uint32_t l1, h1;
-+ pixels += line_size;
-+ for (i = 0; i < h; i += 2) {
-+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
-+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ a = (((const struct unaligned_32 *) (pixels))->l);
-+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
-+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ } pixels += 4 - line_size * (h + 1);
-+ block += 4 - line_size * h;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register int i;
-+ register vector unsigned char
-+ pixelsv1, pixelsv2,
-+ pixelsavg;
-+ register vector unsigned char
-+ blockv, temp1, temp2;
-+ register vector unsigned short
-+ pixelssum1, pixelssum2, temp3;
-+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
-+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
-+
-+ temp1 = vec_ld(0, pixels);
-+ temp2 = vec_ld(16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
-+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
-+ }
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ pixelssum1 = vec_add(pixelssum1, vctwo);
-+
-+POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
-+ for (i = 0; i < h ; i++) {
-+ int rightside = ((unsigned long)block & 0x0000000F);
-+ blockv = vec_ld(0, block);
-+
-+ temp1 = vec_ld(line_size, pixels);
-+ temp2 = vec_ld(line_size + 16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
-+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
-+ }
-+
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ temp3 = vec_add(pixelssum1, pixelssum2);
-+ temp3 = vec_sra(temp3, vctwo);
-+ pixelssum1 = vec_add(pixelssum2, vctwo);
-+ pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
-+
-+ if (rightside)
-+ {
-+ blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
-+ }
-+ else
-+ {
-+ blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
-+ }
-+
-+ vec_st(blockv, 0, block);
-+
-+ block += line_size;
-+ pixels += line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+/* next one assumes that ((line_size % 8) == 0) */
-+void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
-+{
-+POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int j;
-+POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
-+ for (j = 0; j < 2; j++) {
-+ int i;
-+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ const uint32_t b =
-+ (((const struct unaligned_32 *) (pixels + 1))->l);
-+ uint32_t l0 =
-+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
-+ uint32_t h0 =
-+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ uint32_t l1, h1;
-+ pixels += line_size;
-+ for (i = 0; i < h; i += 2) {
-+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
-+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ a = (((const struct unaligned_32 *) (pixels))->l);
-+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
-+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ } pixels += 4 - line_size * (h + 1);
-+ block += 4 - line_size * h;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register int i;
-+ register vector unsigned char
-+ pixelsv1, pixelsv2,
-+ pixelsavg;
-+ register vector unsigned char
-+ blockv, temp1, temp2;
-+ register vector unsigned short
-+ pixelssum1, pixelssum2, temp3;
-+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
-+ register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
-+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
-+
-+ temp1 = vec_ld(0, pixels);
-+ temp2 = vec_ld(16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
-+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
-+ }
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ pixelssum1 = vec_add(pixelssum1, vcone);
-+
-+POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
-+ for (i = 0; i < h ; i++) {
-+ int rightside = ((unsigned long)block & 0x0000000F);
-+ blockv = vec_ld(0, block);
-+
-+ temp1 = vec_ld(line_size, pixels);
-+ temp2 = vec_ld(line_size + 16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
-+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
-+ }
-+
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ temp3 = vec_add(pixelssum1, pixelssum2);
-+ temp3 = vec_sra(temp3, vctwo);
-+ pixelssum1 = vec_add(pixelssum2, vcone);
-+ pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
-+
-+ if (rightside)
-+ {
-+ blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
-+ }
-+ else
-+ {
-+ blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
-+ }
-+
-+ vec_st(blockv, 0, block);
-+
-+ block += line_size;
-+ pixels += line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+/* next one assumes that ((line_size % 16) == 0) */
-+void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
-+{
-+POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int j;
-+POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
-+ for (j = 0; j < 4; j++) {
-+ int i;
-+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ const uint32_t b =
-+ (((const struct unaligned_32 *) (pixels + 1))->l);
-+ uint32_t l0 =
-+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
-+ uint32_t h0 =
-+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ uint32_t l1, h1;
-+ pixels += line_size;
-+ for (i = 0; i < h; i += 2) {
-+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
-+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ a = (((const struct unaligned_32 *) (pixels))->l);
-+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
-+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ } pixels += 4 - line_size * (h + 1);
-+ block += 4 - line_size * h;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register int i;
-+ register vector unsigned char
-+ pixelsv1, pixelsv2, pixelsv3, pixelsv4;
-+ register vector unsigned char
-+ blockv, temp1, temp2;
-+ register vector unsigned short
-+ pixelssum1, pixelssum2, temp3,
-+ pixelssum3, pixelssum4, temp4;
-+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
-+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
-+
-+POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
-+
-+ temp1 = vec_ld(0, pixels);
-+ temp2 = vec_ld(16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
-+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
-+ }
-+ pixelsv3 = vec_mergel(vczero, pixelsv1);
-+ pixelsv4 = vec_mergel(vczero, pixelsv2);
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+ pixelssum3 = vec_add((vector unsigned short)pixelsv3,
-+ (vector unsigned short)pixelsv4);
-+ pixelssum3 = vec_add(pixelssum3, vctwo);
-+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ pixelssum1 = vec_add(pixelssum1, vctwo);
-+
-+ for (i = 0; i < h ; i++) {
-+ blockv = vec_ld(0, block);
-+
-+ temp1 = vec_ld(line_size, pixels);
-+ temp2 = vec_ld(line_size + 16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
-+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
-+ }
-+
-+ pixelsv3 = vec_mergel(vczero, pixelsv1);
-+ pixelsv4 = vec_mergel(vczero, pixelsv2);
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+
-+ pixelssum4 = vec_add((vector unsigned short)pixelsv3,
-+ (vector unsigned short)pixelsv4);
-+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ temp4 = vec_add(pixelssum3, pixelssum4);
-+ temp4 = vec_sra(temp4, vctwo);
-+ temp3 = vec_add(pixelssum1, pixelssum2);
-+ temp3 = vec_sra(temp3, vctwo);
-+
-+ pixelssum3 = vec_add(pixelssum4, vctwo);
-+ pixelssum1 = vec_add(pixelssum2, vctwo);
-+
-+ blockv = vec_packsu(temp3, temp4);
-+
-+ vec_st(blockv, 0, block);
-+
-+ block += line_size;
-+ pixels += line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+/* next one assumes that ((line_size % 16) == 0) */
-+void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
-+{
-+POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int j;
-+POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-+ for (j = 0; j < 4; j++) {
-+ int i;
-+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ const uint32_t b =
-+ (((const struct unaligned_32 *) (pixels + 1))->l);
-+ uint32_t l0 =
-+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
-+ uint32_t h0 =
-+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ uint32_t l1, h1;
-+ pixels += line_size;
-+ for (i = 0; i < h; i += 2) {
-+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
-+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
-+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ a = (((const struct unaligned_32 *) (pixels))->l);
-+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
-+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
-+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
-+ *((uint32_t *) block) =
-+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
-+ pixels += line_size;
-+ block += line_size;
-+ } pixels += 4 - line_size * (h + 1);
-+ block += 4 - line_size * h;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ register int i;
-+ register vector unsigned char
-+ pixelsv1, pixelsv2, pixelsv3, pixelsv4;
-+ register vector unsigned char
-+ blockv, temp1, temp2;
-+ register vector unsigned short
-+ pixelssum1, pixelssum2, temp3,
-+ pixelssum3, pixelssum4, temp4;
-+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
-+ register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
-+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
-+
-+POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-+
-+ temp1 = vec_ld(0, pixels);
-+ temp2 = vec_ld(16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
-+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
-+ }
-+ pixelsv3 = vec_mergel(vczero, pixelsv1);
-+ pixelsv4 = vec_mergel(vczero, pixelsv2);
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+ pixelssum3 = vec_add((vector unsigned short)pixelsv3,
-+ (vector unsigned short)pixelsv4);
-+ pixelssum3 = vec_add(pixelssum3, vcone);
-+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ pixelssum1 = vec_add(pixelssum1, vcone);
-+
-+ for (i = 0; i < h ; i++) {
-+ blockv = vec_ld(0, block);
-+
-+ temp1 = vec_ld(line_size, pixels);
-+ temp2 = vec_ld(line_size + 16, pixels);
-+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
-+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
-+ {
-+ pixelsv2 = temp2;
-+ }
-+ else
-+ {
-+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
-+ }
-+
-+ pixelsv3 = vec_mergel(vczero, pixelsv1);
-+ pixelsv4 = vec_mergel(vczero, pixelsv2);
-+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
-+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
-+
-+ pixelssum4 = vec_add((vector unsigned short)pixelsv3,
-+ (vector unsigned short)pixelsv4);
-+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
-+ (vector unsigned short)pixelsv2);
-+ temp4 = vec_add(pixelssum3, pixelssum4);
-+ temp4 = vec_sra(temp4, vctwo);
-+ temp3 = vec_add(pixelssum1, pixelssum2);
-+ temp3 = vec_sra(temp3, vctwo);
-+
-+ pixelssum3 = vec_add(pixelssum4, vcone);
-+ pixelssum1 = vec_add(pixelssum2, vcone);
-+
-+ blockv = vec_packsu(temp3, temp4);
-+
-+ vec_st(blockv, 0, block);
-+
-+ block += line_size;
-+ pixels += line_size;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+int has_altivec(void)
-+{
-+#ifdef CONFIG_DARWIN
-+ int sels[2] = {CTL_HW, HW_VECTORUNIT};
-+ int has_vu = 0;
-+ size_t len = sizeof(has_vu);
-+ int err;
-+
-+ err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
-+
-+ if (err == 0) return (has_vu != 0);
-+#else /* CONFIG_DARWIN */
-+/* no Darwin, do it the brute-force way */
-+/* this is borrowed from the libmpeg2 library */
-+ {
-+ signal (SIGILL, sigill_handler);
-+ if (sigsetjmp (jmpbuf, 1)) {
-+ signal (SIGILL, SIG_DFL);
-+ } else {
-+ canjump = 1;
-+
-+ asm volatile ("mtspr 256, %0\n\t"
-+ "vand %%v0, %%v0, %%v0"
-+ :
-+ : "r" (-1));
-+
-+ signal (SIGILL, SIG_DFL);
-+ return 1;
-+ }
-+ }
-+#endif /* CONFIG_DARWIN */
-+ return 0;
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/dsputil_ppc.c avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/dsputil_ppc.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/dsputil_ppc.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/dsputil_ppc.c 2003-09-28 17:26:40.000000000 +0200
-@@ -0,0 +1,307 @@
-+/*
-+ * Copyright (c) 2002 Brian Foley
-+ * Copyright (c) 2002 Dieter Shirley
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include "../dsputil.h"
-+
-+#include "dsputil_ppc.h"
-+
-+#ifdef HAVE_ALTIVEC
-+#include "dsputil_altivec.h"
-+#endif
-+
-+extern void idct_put_altivec(uint8_t *dest, int line_size, int16_t *block);
-+extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block);
-+
-+int mm_flags = 0;
-+
-+int mm_support(void)
-+{
-+ int result = 0;
-+#if HAVE_ALTIVEC
-+ if (has_altivec()) {
-+ result |= MM_ALTIVEC;
-+ }
-+#endif /* result */
-+ return result;
-+}
-+
-+#ifdef POWERPC_PERFORMANCE_REPORT
-+unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][powerpc_data_total];
-+/* list below must match enum in dsputil_ppc.h */
-+static unsigned char* perfname[] = {
-+ "fft_calc_altivec",
-+ "gmc1_altivec",
-+ "dct_unquantize_h263_altivec",
-+ "idct_add_altivec",
-+ "idct_put_altivec",
-+ "put_pixels16_altivec",
-+ "avg_pixels16_altivec",
-+ "avg_pixels8_altivec",
-+ "put_pixels8_xy2_altivec",
-+ "put_no_rnd_pixels8_xy2_altivec",
-+ "put_pixels16_xy2_altivec",
-+ "put_no_rnd_pixels16_xy2_altivec",
-+ "clear_blocks_dcbz32_ppc",
-+ "clear_blocks_dcbz128_ppc"
-+};
-+#include <stdio.h>
-+#endif
-+
-+#ifdef POWERPC_PERFORMANCE_REPORT
-+void powerpc_display_perf_report(void)
-+{
-+ int i, j;
-+ fprintf(stderr, "PowerPC performance report\n Values are from the PMC registers, and represent whatever the registers are set to record.\n");
-+ for(i = 0 ; i < powerpc_perf_total ; i++)
-+ {
-+ for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++)
-+ {
-+ if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0)
-+ fprintf(stderr,
-+ " Function \"%s\" (pmc%d):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n",
-+ perfname[i],
-+ j+1,
-+ perfdata[j][i][powerpc_data_min],
-+ perfdata[j][i][powerpc_data_max],
-+ (double)perfdata[j][i][powerpc_data_sum] /
-+ (double)perfdata[j][i][powerpc_data_num],
-+ perfdata[j][i][powerpc_data_num]);
-+ }
-+ }
-+}
-+#endif /* POWERPC_PERFORMANCE_REPORT */
-+
-+/* ***** WARNING ***** WARNING ***** WARNING ***** */
-+/*
-+ clear_blocks_dcbz32_ppc will not work properly
-+ on PowerPC processors with a cache line size
-+ not equal to 32 bytes.
-+ Fortunately all processor used by Apple up to
-+ at least the 7450 (aka second generation G4)
-+ use 32 bytes cache line.
-+ This is due to the use of the 'dcbz' instruction.
-+ It simply clear to zero a single cache line,
-+ so you need to know the cache line size to use it !
-+ It's absurd, but it's fast...
-+
-+ update 24/06/2003 : Apple released yesterday the G5,
-+ with a PPC970. cache line size : 128 bytes. Oups.
-+ The semantic of dcbz was changed, it always clear
-+ 32 bytes. so the function below will work, but will
-+ be slow. So I fixed check_dcbz_effect to use dcbzl,
-+ which is defined to clear a cache line (as dcbz before).
-+ So we still can distinguish, and use dcbz (32 bytes)
-+ or dcbzl (one cache line) as required.
-+
-+ see <http://developer.apple.com/technotes/tn/tn2087.html>
-+ and <http://developer.apple.com/technotes/tn/tn2086.html>
-+*/
-+void clear_blocks_dcbz32_ppc(DCTELEM *blocks)
-+{
-+POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz32, 1);
-+ register int misal = ((unsigned long)blocks & 0x00000010);
-+ register int i = 0;
-+POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz32, 1);
-+#if 1
-+ if (misal) {
-+ ((unsigned long*)blocks)[0] = 0L;
-+ ((unsigned long*)blocks)[1] = 0L;
-+ ((unsigned long*)blocks)[2] = 0L;
-+ ((unsigned long*)blocks)[3] = 0L;
-+ i += 16;
-+ }
-+ for ( ; i < sizeof(DCTELEM)*6*64 ; i += 32) {
-+ asm volatile("dcbz %0,%1" : : "b" (blocks), "r" (i) : "memory");
-+ }
-+ if (misal) {
-+ ((unsigned long*)blocks)[188] = 0L;
-+ ((unsigned long*)blocks)[189] = 0L;
-+ ((unsigned long*)blocks)[190] = 0L;
-+ ((unsigned long*)blocks)[191] = 0L;
-+ i += 16;
-+ }
-+#else
-+ memset(blocks, 0, sizeof(DCTELEM)*6*64);
-+#endif
-+POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz32, 1);
-+}
-+
-+/* same as above, when dcbzl clear a whole 128B cache line
-+ i.e. the PPC970 aka G5 */
-+#ifndef NO_DCBZL
-+void clear_blocks_dcbz128_ppc(DCTELEM *blocks)
-+{
-+POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz128, 1);
-+ register int misal = ((unsigned long)blocks & 0x0000007f);
-+ register int i = 0;
-+POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz128, 1);
-+#if 1
-+ if (misal) {
-+ // we could probably also optimize this case,
-+ // but there's not much point as the machines
-+ // aren't available yet (2003-06-26)
-+ memset(blocks, 0, sizeof(DCTELEM)*6*64);
-+ }
-+ else
-+ for ( ; i < sizeof(DCTELEM)*6*64 ; i += 128) {
-+ asm volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory");
-+ }
-+#else
-+ memset(blocks, 0, sizeof(DCTELEM)*6*64);
-+#endif
-+POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz128, 1);
-+}
-+#else
-+void clear_blocks_dcbz128_ppc(DCTELEM *blocks)
-+{
-+ memset(blocks, 0, sizeof(DCTELEM)*6*64);
-+}
-+#endif
-+
-+#ifndef NO_DCBZL
-+/* check dcbz report how many bytes are set to 0 by dcbz */
-+/* update 24/06/2003 : replace dcbz by dcbzl to get
-+ the intended effect (Apple "fixed" dcbz)
-+ unfortunately this cannot be used unless the assembler
-+ knows about dcbzl ... */
-+long check_dcbzl_effect(void)
-+{
-+ register char *fakedata = (char*)av_malloc(1024);
-+ register char *fakedata_middle;
-+ register long zero = 0;
-+ register long i = 0;
-+ long count = 0;
-+
-+ if (!fakedata)
-+ {
-+ return 0L;
-+ }
-+
-+ fakedata_middle = (fakedata + 512);
-+
-+ memset(fakedata, 0xFF, 1024);
-+
-+ /* below the constraint "b" seems to mean "Address base register"
-+ in gcc-3.3 / RS/6000 speaks. seems to avoid using r0, so.... */
-+ asm volatile("dcbzl %0, %1" : : "b" (fakedata_middle), "r" (zero));
-+
-+ for (i = 0; i < 1024 ; i ++)
-+ {
-+ if (fakedata[i] == (char)0)
-+ count++;
-+ }
-+
-+ av_free(fakedata);
-+
-+ return count;
-+}
-+#else
-+long check_dcbzl_effect(void)
-+{
-+ return 0;
-+}
-+#endif
-+
-+void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
-+{
-+ // Common optimizations whether Altivec is available or not
-+
-+ switch (check_dcbzl_effect()) {
-+ case 32:
-+ c->clear_blocks = clear_blocks_dcbz32_ppc;
-+ break;
-+ case 128:
-+ c->clear_blocks = clear_blocks_dcbz128_ppc;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+#if HAVE_ALTIVEC
-+ if (has_altivec()) {
-+ mm_flags |= MM_ALTIVEC;
-+
-+ // Altivec specific optimisations
-+ c->pix_abs16x16_x2 = pix_abs16x16_x2_altivec;
-+ c->pix_abs16x16_y2 = pix_abs16x16_y2_altivec;
-+ c->pix_abs16x16_xy2 = pix_abs16x16_xy2_altivec;
-+ c->pix_abs16x16 = pix_abs16x16_altivec;
-+ c->pix_abs8x8 = pix_abs8x8_altivec;
-+ c->sad[0]= sad16x16_altivec;
-+ c->sad[1]= sad8x8_altivec;
-+ c->pix_norm1 = pix_norm1_altivec;
-+ c->sse[1]= sse8_altivec;
-+ c->sse[0]= sse16_altivec;
-+ c->pix_sum = pix_sum_altivec;
-+ c->diff_pixels = diff_pixels_altivec;
-+ c->get_pixels = get_pixels_altivec;
-+// next one disabled as it's untested.
-+#if 0
-+ c->add_bytes= add_bytes_altivec;
-+#endif /* 0 */
-+ c->put_pixels_tab[0][0] = put_pixels16_altivec;
-+ /* the tow functions do the same thing, so use the same code */
-+ c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
-+ c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
-+// next one disabled as it's untested.
-+#if 0
-+ c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
-+#endif /* 0 */
-+ c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
-+ c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
-+ c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
-+ c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
-+
-+ c->gmc1 = gmc1_altivec;
-+
-+ if ((avctx->idct_algo == FF_IDCT_AUTO) ||
-+ (avctx->idct_algo == FF_IDCT_ALTIVEC))
-+ {
-+ c->idct_put = idct_put_altivec;
-+ c->idct_add = idct_add_altivec;
-+#ifndef ALTIVEC_USE_REFERENCE_C_CODE
-+ c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ c->idct_permutation_type = FF_NO_IDCT_PERM;
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ }
-+
-+#ifdef POWERPC_PERFORMANCE_REPORT
-+ {
-+ int i, j;
-+ for (i = 0 ; i < powerpc_perf_total ; i++)
-+ {
-+ for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++)
-+ {
-+ perfdata[j][i][powerpc_data_min] = (unsigned long long)0xFFFFFFFFFFFFFFFF;
-+ perfdata[j][i][powerpc_data_max] = (unsigned long long)0x0000000000000000;
-+ perfdata[j][i][powerpc_data_sum] = (unsigned long long)0x0000000000000000;
-+ perfdata[j][i][powerpc_data_num] = (unsigned long long)0x0000000000000000;
-+ }
-+ }
-+ }
-+#endif /* POWERPC_PERFORMANCE_REPORT */
-+ } else
-+#endif /* HAVE_ALTIVEC */
-+ {
-+ // Non-AltiVec PPC optimisations
-+
-+ // ... pending ...
-+ }
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/fft_altivec.c avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/fft_altivec.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/fft_altivec.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/fft_altivec.c 2003-09-28 17:26:40.000000000 +0200
-@@ -0,0 +1,247 @@
-+/*
-+ * FFT/IFFT transforms
-+ * AltiVec-enabled
-+ * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
-+ * Based on code Copyright (c) 2002 Fabrice Bellard.
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+#include "../dsputil.h"
-+
-+#include "gcc_fixes.h"
-+
-+#include "dsputil_altivec.h"
-+
-+/*
-+ those three macros are from libavcodec/fft.c
-+ and are required for the reference C code
-+*/
-+/* butter fly op */
-+#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
-+{\
-+ FFTSample ax, ay, bx, by;\
-+ bx=pre1;\
-+ by=pim1;\
-+ ax=qre1;\
-+ ay=qim1;\
-+ pre = (bx + ax);\
-+ pim = (by + ay);\
-+ qre = (bx - ax);\
-+ qim = (by - ay);\
-+}
-+#define MUL16(a,b) ((a) * (b))
-+#define CMUL(pre, pim, are, aim, bre, bim) \
-+{\
-+ pre = (MUL16(are, bre) - MUL16(aim, bim));\
-+ pim = (MUL16(are, bim) + MUL16(bre, aim));\
-+}
-+
-+
-+/**
-+ * Do a complex FFT with the parameters defined in fft_init(). The
-+ * input data must be permuted before with s->revtab table. No
-+ * 1.0/sqrt(n) normalization is done.
-+ * AltiVec-enabled
-+ * This code assumes that the 'z' pointer is 16 bytes-aligned
-+ * It also assumes all FFTComplex are 8 bytes-aligned pair of float
-+ * The code is exactly the same as the SSE version, except
-+ * that successive MUL + ADD/SUB have been merged into
-+ * fused multiply-add ('vec_madd' in altivec)
-+ */
-+void fft_calc_altivec(FFTContext *s, FFTComplex *z)
-+{
-+POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ int ln = s->nbits;
-+ int j, np, np2;
-+ int nblocks, nloops;
-+ register FFTComplex *p, *q;
-+ FFTComplex *exptab = s->exptab;
-+ int l;
-+ FFTSample tmp_re, tmp_im;
-+
-+POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
-+
-+ np = 1 << ln;
-+
-+ /* pass 0 */
-+
-+ p=&z[0];
-+ j=(np >> 1);
-+ do {
-+ BF(p[0].re, p[0].im, p[1].re, p[1].im,
-+ p[0].re, p[0].im, p[1].re, p[1].im);
-+ p+=2;
-+ } while (--j != 0);
-+
-+ /* pass 1 */
-+
-+
-+ p=&z[0];
-+ j=np >> 2;
-+ if (s->inverse) {
-+ do {
-+ BF(p[0].re, p[0].im, p[2].re, p[2].im,
-+ p[0].re, p[0].im, p[2].re, p[2].im);
-+ BF(p[1].re, p[1].im, p[3].re, p[3].im,
-+ p[1].re, p[1].im, -p[3].im, p[3].re);
-+ p+=4;
-+ } while (--j != 0);
-+ } else {
-+ do {
-+ BF(p[0].re, p[0].im, p[2].re, p[2].im,
-+ p[0].re, p[0].im, p[2].re, p[2].im);
-+ BF(p[1].re, p[1].im, p[3].re, p[3].im,
-+ p[1].re, p[1].im, p[3].im, -p[3].re);
-+ p+=4;
-+ } while (--j != 0);
-+ }
-+ /* pass 2 .. ln-1 */
-+
-+ nblocks = np >> 3;
-+ nloops = 1 << 2;
-+ np2 = np >> 1;
-+ do {
-+ p = z;
-+ q = z + nloops;
-+ for (j = 0; j < nblocks; ++j) {
-+ BF(p->re, p->im, q->re, q->im,
-+ p->re, p->im, q->re, q->im);
-+
-+ p++;
-+ q++;
-+ for(l = nblocks; l < np2; l += nblocks) {
-+ CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im);
-+ BF(p->re, p->im, q->re, q->im,
-+ p->re, p->im, tmp_re, tmp_im);
-+ p++;
-+ q++;
-+ }
-+
-+ p += nloops;
-+ q += nloops;
-+ }
-+ nblocks = nblocks >> 1;
-+ nloops = nloops << 1;
-+ } while (nblocks != 0);
-+
-+POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+#ifdef CONFIG_DARWIN
-+ register const vector float vczero = (const vector float)(0.);
-+#else
-+ register const vector float vczero = (const vector float){0.,0.,0.,0.};
-+#endif
-+
-+ int ln = s->nbits;
-+ int j, np, np2;
-+ int nblocks, nloops;
-+ register FFTComplex *p, *q;
-+ FFTComplex *cptr, *cptr1;
-+ int k;
-+
-+POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
-+
-+ np = 1 << ln;
-+
-+ {
-+ vector float *r, a, b, a1, c1, c2;
-+
-+ r = (vector float *)&z[0];
-+
-+ c1 = vcii(p,p,n,n);
-+
-+ if (s->inverse)
-+ {
-+ c2 = vcii(p,p,n,p);
-+ }
-+ else
-+ {
-+ c2 = vcii(p,p,p,n);
-+ }
-+
-+ j = (np >> 2);
-+ do {
-+ a = vec_ld(0, r);
-+ a1 = vec_ld(sizeof(vector float), r);
-+
-+ b = vec_perm(a,a,vcprmle(1,0,3,2));
-+ a = vec_madd(a,c1,b);
-+ /* do the pass 0 butterfly */
-+
-+ b = vec_perm(a1,a1,vcprmle(1,0,3,2));
-+ b = vec_madd(a1,c1,b);
-+ /* do the pass 0 butterfly */
-+
-+ /* multiply third by -i */
-+ b = vec_perm(b,b,vcprmle(2,3,1,0));
-+
-+ /* do the pass 1 butterfly */
-+ vec_st(vec_madd(b,c2,a), 0, r);
-+ vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r);
-+
-+ r += 2;
-+ } while (--j != 0);
-+ }
-+ /* pass 2 .. ln-1 */
-+
-+ nblocks = np >> 3;
-+ nloops = 1 << 2;
-+ np2 = np >> 1;
-+
-+ cptr1 = s->exptab1;
-+ do {
-+ p = z;
-+ q = z + nloops;
-+ j = nblocks;
-+ do {
-+ cptr = cptr1;
-+ k = nloops >> 1;
-+ do {
-+ vector float a,b,c,t1;
-+
-+ a = vec_ld(0, (float*)p);
-+ b = vec_ld(0, (float*)q);
-+
-+ /* complex mul */
-+ c = vec_ld(0, (float*)cptr);
-+ /* cre*re cim*re */
-+ t1 = vec_madd(c, vec_perm(b,b,vcprmle(2,2,0,0)),vczero);
-+ c = vec_ld(sizeof(vector float), (float*)cptr);
-+ /* -cim*im cre*im */
-+ b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1);
-+
-+ /* butterfly */
-+ vec_st(vec_add(a,b), 0, (float*)p);
-+ vec_st(vec_sub(a,b), 0, (float*)q);
-+
-+ p += 2;
-+ q += 2;
-+ cptr += 4;
-+ } while (--k);
-+
-+ p += nloops;
-+ q += nloops;
-+ } while (--j);
-+ cptr1 += nloops * 2;
-+ nblocks = nblocks >> 1;
-+ nloops = nloops << 1;
-+ } while (nblocks != 0);
-+
-+POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
-+
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/gcc_fixes.h avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/gcc_fixes.h
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/gcc_fixes.h 2003-07-04 15:40:29.000000000 +0200
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/gcc_fixes.h 2003-09-28 17:26:40.000000000 +0200
-@@ -25,7 +25,7 @@
- * http://gcc.gnu.org/ml/gcc/2003-04/msg00967.html
- */
-
--static inline vector signed char my_vmrglb (vector signed char const A,
-+static inline vector signed char ff_vmrglb (vector signed char const A,
- vector signed char const B)
- {
- static const vector unsigned char lowbyte = {
-@@ -35,7 +35,7 @@
- return vec_perm (A, B, lowbyte);
- }
-
--static inline vector signed short my_vmrglh (vector signed short const A,
-+static inline vector signed short ff_vmrglh (vector signed short const A,
- vector signed short const B)
- {
- static const vector unsigned char lowhalf = {
-@@ -45,7 +45,7 @@
- return vec_perm (A, B, lowhalf);
- }
-
--static inline vector signed int my_vmrglw (vector signed int const A,
-+static inline vector signed int ff_vmrglw (vector signed int const A,
- vector signed int const B)
- {
- static const vector unsigned char lowword = {
-@@ -54,27 +54,27 @@
- };
- return vec_perm (A, B, lowword);
- }
--/*#define my_vmrglb my_vmrglb
--#define my_vmrglh my_vmrglh
--#define my_vmrglw my_vmrglw
-+/*#define ff_vmrglb ff_vmrglb
-+#define ff_vmrglh ff_vmrglh
-+#define ff_vmrglw ff_vmrglw
- */
- #undef vec_mergel
-
- #define vec_mergel(a1, a2) \
- __ch (__bin_args_eq (vector signed char, (a1), vector signed char, (a2)), \
-- ((vector signed char) my_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
-+ ((vector signed char) ff_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
- __ch (__bin_args_eq (vector unsigned char, (a1), vector unsigned char, (a2)), \
-- ((vector unsigned char) my_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
-+ ((vector unsigned char) ff_vmrglb ((vector signed char) (a1), (vector signed char) (a2))), \
- __ch (__bin_args_eq (vector signed short, (a1), vector signed short, (a2)), \
-- ((vector signed short) my_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
-+ ((vector signed short) ff_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
- __ch (__bin_args_eq (vector unsigned short, (a1), vector unsigned short, (a2)), \
-- ((vector unsigned short) my_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
-+ ((vector unsigned short) ff_vmrglh ((vector signed short) (a1), (vector signed short) (a2))), \
- __ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
-- ((vector float) my_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-+ ((vector float) ff_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
- __ch (__bin_args_eq (vector signed int, (a1), vector signed int, (a2)), \
-- ((vector signed int) my_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-+ ((vector signed int) ff_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
- __ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
-- ((vector unsigned int) my_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
-+ ((vector unsigned int) ff_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
- __altivec_link_error_invalid_argument ())))))))
-
- #endif
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/gmc_altivec.c avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/gmc_altivec.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/gmc_altivec.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/gmc_altivec.c 2003-09-28 17:26:40.000000000 +0200
-@@ -0,0 +1,172 @@
-+/*
-+ * GMC (Global Motion Compensation)
-+ * AltiVec-enabled
-+ * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include "../dsputil.h"
-+
-+#include "gcc_fixes.h"
-+
-+#include "dsputil_altivec.h"
-+
-+/*
-+ altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
-+ to preserve proper dst alignement.
-+*/
-+#define GMC1_PERF_COND (h==8)
-+void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
-+{
-+POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ const int A=(16-x16)*(16-y16);
-+ const int B=( x16)*(16-y16);
-+ const int C=(16-x16)*( y16);
-+ const int D=( x16)*( y16);
-+ int i;
-+
-+POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-+
-+ for(i=0; i<h; i++)
-+ {
-+ dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
-+ dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
-+ dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
-+ dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
-+ dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
-+ dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
-+ dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
-+ dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
-+ dst+= stride;
-+ src+= stride;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-+
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ const unsigned short __attribute__ ((aligned(16))) rounder_a[8] =
-+ {rounder, rounder, rounder, rounder,
-+ rounder, rounder, rounder, rounder};
-+ const unsigned short __attribute__ ((aligned(16))) ABCD[8] =
-+ {
-+ (16-x16)*(16-y16), /* A */
-+ ( x16)*(16-y16), /* B */
-+ (16-x16)*( y16), /* C */
-+ ( x16)*( y16), /* D */
-+ 0, 0, 0, 0 /* padding */
-+ };
-+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
-+ register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
-+ register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
-+ register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
-+ int i;
-+ unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
-+ unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
-+
-+
-+POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-+
-+ tempA = vec_ld(0, (unsigned short*)ABCD);
-+ Av = vec_splat(tempA, 0);
-+ Bv = vec_splat(tempA, 1);
-+ Cv = vec_splat(tempA, 2);
-+ Dv = vec_splat(tempA, 3);
-+
-+ rounderV = vec_ld(0, (unsigned short*)rounder_a);
-+
-+ // we'll be able to pick-up our 9 char elements
-+ // at src from those 32 bytes
-+ // we load the first batch here, as inside the loop
-+ // we can re-use 'src+stride' from one iteration
-+ // as the 'src' of the next.
-+ src_0 = vec_ld(0, src);
-+ src_1 = vec_ld(16, src);
-+ srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
-+
-+ if (src_really_odd != 0x0000000F)
-+ { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
-+ srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
-+ }
-+ else
-+ {
-+ srcvB = src_1;
-+ }
-+ srcvA = vec_mergeh(vczero, srcvA);
-+ srcvB = vec_mergeh(vczero, srcvB);
-+
-+ for(i=0; i<h; i++)
-+ {
-+ dst_odd = (unsigned long)dst & 0x0000000F;
-+ src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
-+
-+ dstv = vec_ld(0, dst);
-+
-+ // we we'll be able to pick-up our 9 char elements
-+ // at src + stride from those 32 bytes
-+ // then reuse the resulting 2 vectors srvcC and srcvD
-+ // as the next srcvA and srcvB
-+ src_0 = vec_ld(stride + 0, src);
-+ src_1 = vec_ld(stride + 16, src);
-+ srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
-+
-+ if (src_really_odd != 0x0000000F)
-+ { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
-+ srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
-+ }
-+ else
-+ {
-+ srcvD = src_1;
-+ }
-+
-+ srcvC = vec_mergeh(vczero, srcvC);
-+ srcvD = vec_mergeh(vczero, srcvD);
-+
-+
-+ // OK, now we (finally) do the math :-)
-+ // those four instructions replaces 32 int muls & 32 int adds.
-+ // isn't AltiVec nice ?
-+ tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
-+ tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
-+ tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
-+ tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
-+
-+ srcvA = srcvC;
-+ srcvB = srcvD;
-+
-+ tempD = vec_sr(tempD, vcsr8);
-+
-+ dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
-+
-+ if (dst_odd)
-+ {
-+ dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
-+ }
-+ else
-+ {
-+ dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
-+ }
-+
-+ vec_st(dstv2, 0, dst);
-+
-+ dst += stride;
-+ src += stride;
-+ }
-+
-+POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-+
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/idct_altivec.c avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/idct_altivec.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/idct_altivec.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/idct_altivec.c 2003-09-28 17:26:40.000000000 +0200
-@@ -0,0 +1,245 @@
-+/*
-+ * Copyright (c) 2001 Michel Lespinasse
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ */
-+
-+/*
-+ * NOTE: This code is based on GPL code from the libmpeg2 project. The
-+ * author, Michel Lespinasses, has given explicit permission to release
-+ * under LGPL as part of ffmpeg.
-+ *
-+ */
-+
-+/*
-+ * FFMpeg integration by Dieter Shirley
-+ *
-+ * This file is a direct copy of the altivec idct module from the libmpeg2
-+ * project. I've deleted all of the libmpeg2 specific code, renamed the functions and
-+ * re-ordered the function parameters. The only change to the IDCT function
-+ * itself was to factor out the partial transposition, and to perform a full
-+ * transpose at the end of the function.
-+ */
-+
-+
-+#include <stdlib.h> /* malloc(), free() */
-+#include <string.h>
-+#include "../dsputil.h"
-+
-+#include "gcc_fixes.h"
-+
-+#include "dsputil_altivec.h"
-+
-+#define vector_s16_t vector signed short
-+#define vector_u16_t vector unsigned short
-+#define vector_s8_t vector signed char
-+#define vector_u8_t vector unsigned char
-+#define vector_s32_t vector signed int
-+#define vector_u32_t vector unsigned int
-+
-+#define IDCT_HALF \
-+ /* 1st stage */ \
-+ t1 = vec_mradds (a1, vx7, vx1 ); \
-+ t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
-+ t7 = vec_mradds (a2, vx5, vx3); \
-+ t3 = vec_mradds (ma2, vx3, vx5); \
-+ \
-+ /* 2nd stage */ \
-+ t5 = vec_adds (vx0, vx4); \
-+ t0 = vec_subs (vx0, vx4); \
-+ t2 = vec_mradds (a0, vx6, vx2); \
-+ t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
-+ t6 = vec_adds (t8, t3); \
-+ t3 = vec_subs (t8, t3); \
-+ t8 = vec_subs (t1, t7); \
-+ t1 = vec_adds (t1, t7); \
-+ \
-+ /* 3rd stage */ \
-+ t7 = vec_adds (t5, t2); \
-+ t2 = vec_subs (t5, t2); \
-+ t5 = vec_adds (t0, t4); \
-+ t0 = vec_subs (t0, t4); \
-+ t4 = vec_subs (t8, t3); \
-+ t3 = vec_adds (t8, t3); \
-+ \
-+ /* 4th stage */ \
-+ vy0 = vec_adds (t7, t1); \
-+ vy7 = vec_subs (t7, t1); \
-+ vy1 = vec_mradds (c4, t3, t5); \
-+ vy6 = vec_mradds (mc4, t3, t5); \
-+ vy2 = vec_mradds (c4, t4, t0); \
-+ vy5 = vec_mradds (mc4, t4, t0); \
-+ vy3 = vec_adds (t2, t6); \
-+ vy4 = vec_subs (t2, t6);
-+
-+
-+#define IDCT \
-+ vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
-+ vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
-+ vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
-+ vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
-+ vector_u16_t shift; \
-+ \
-+ c4 = vec_splat (constants[0], 0); \
-+ a0 = vec_splat (constants[0], 1); \
-+ a1 = vec_splat (constants[0], 2); \
-+ a2 = vec_splat (constants[0], 3); \
-+ mc4 = vec_splat (constants[0], 4); \
-+ ma2 = vec_splat (constants[0], 5); \
-+ bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \
-+ \
-+ zero = vec_splat_s16 (0); \
-+ shift = vec_splat_u16 (4); \
-+ \
-+ vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
-+ vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
-+ vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
-+ vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
-+ vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
-+ vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
-+ vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
-+ vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
-+ \
-+ IDCT_HALF \
-+ \
-+ vx0 = vec_mergeh (vy0, vy4); \
-+ vx1 = vec_mergel (vy0, vy4); \
-+ vx2 = vec_mergeh (vy1, vy5); \
-+ vx3 = vec_mergel (vy1, vy5); \
-+ vx4 = vec_mergeh (vy2, vy6); \
-+ vx5 = vec_mergel (vy2, vy6); \
-+ vx6 = vec_mergeh (vy3, vy7); \
-+ vx7 = vec_mergel (vy3, vy7); \
-+ \
-+ vy0 = vec_mergeh (vx0, vx4); \
-+ vy1 = vec_mergel (vx0, vx4); \
-+ vy2 = vec_mergeh (vx1, vx5); \
-+ vy3 = vec_mergel (vx1, vx5); \
-+ vy4 = vec_mergeh (vx2, vx6); \
-+ vy5 = vec_mergel (vx2, vx6); \
-+ vy6 = vec_mergeh (vx3, vx7); \
-+ vy7 = vec_mergel (vx3, vx7); \
-+ \
-+ vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
-+ vx1 = vec_mergel (vy0, vy4); \
-+ vx2 = vec_mergeh (vy1, vy5); \
-+ vx3 = vec_mergel (vy1, vy5); \
-+ vx4 = vec_mergeh (vy2, vy6); \
-+ vx5 = vec_mergel (vy2, vy6); \
-+ vx6 = vec_mergeh (vy3, vy7); \
-+ vx7 = vec_mergel (vy3, vy7); \
-+ \
-+ IDCT_HALF \
-+ \
-+ shift = vec_splat_u16 (6); \
-+ vx0 = vec_sra (vy0, shift); \
-+ vx1 = vec_sra (vy1, shift); \
-+ vx2 = vec_sra (vy2, shift); \
-+ vx3 = vec_sra (vy3, shift); \
-+ vx4 = vec_sra (vy4, shift); \
-+ vx5 = vec_sra (vy5, shift); \
-+ vx6 = vec_sra (vy6, shift); \
-+ vx7 = vec_sra (vy7, shift);
-+
-+
-+static const vector_s16_t constants[5] = {
-+ (vector_s16_t) AVV(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
-+ (vector_s16_t) AVV(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
-+ (vector_s16_t) AVV(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
-+ (vector_s16_t) AVV(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
-+ (vector_s16_t) AVV(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
-+};
-+
-+void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
-+{
-+POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
-+ void simple_idct_put(uint8_t *dest, int line_size, int16_t *block);
-+ simple_idct_put(dest, stride, (int16_t*)block);
-+POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ vector_u8_t tmp;
-+
-+POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
-+
-+ IDCT
-+
-+#define COPY(dest,src) \
-+ tmp = vec_packsu (src, src); \
-+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
-+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
-+
-+ COPY (dest, vx0) dest += stride;
-+ COPY (dest, vx1) dest += stride;
-+ COPY (dest, vx2) dest += stride;
-+ COPY (dest, vx3) dest += stride;
-+ COPY (dest, vx4) dest += stride;
-+ COPY (dest, vx5) dest += stride;
-+ COPY (dest, vx6) dest += stride;
-+ COPY (dest, vx7)
-+
-+POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-+void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block)
-+{
-+POWERPC_PERF_DECLARE(altivec_idct_add_num, 1);
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
-+ void simple_idct_add(uint8_t *dest, int line_size, int16_t *block);
-+ simple_idct_add(dest, stride, (int16_t*)block);
-+POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ vector_u8_t tmp;
-+ vector_s16_t tmp2, tmp3;
-+ vector_u8_t perm0;
-+ vector_u8_t perm1;
-+ vector_u8_t p0, p1, p;
-+
-+POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
-+
-+ IDCT
-+
-+ p0 = vec_lvsl (0, dest);
-+ p1 = vec_lvsl (stride, dest);
-+ p = vec_splat_u8 (-1);
-+ perm0 = vec_mergeh (p, p0);
-+ perm1 = vec_mergeh (p, p1);
-+
-+#define ADD(dest,src,perm) \
-+ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
-+ tmp = vec_ld (0, dest); \
-+ tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
-+ tmp3 = vec_adds (tmp2, src); \
-+ tmp = vec_packsu (tmp3, tmp3); \
-+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
-+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
-+
-+ ADD (dest, vx0, perm0) dest += stride;
-+ ADD (dest, vx1, perm1) dest += stride;
-+ ADD (dest, vx2, perm0) dest += stride;
-+ ADD (dest, vx3, perm1) dest += stride;
-+ ADD (dest, vx4, perm0) dest += stride;
-+ ADD (dest, vx5, perm1) dest += stride;
-+ ADD (dest, vx6, perm0) dest += stride;
-+ ADD (dest, vx7, perm1)
-+
-+POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+}
-+
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/mpegvideo_altivec.c avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/mpegvideo_altivec.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c 2003-09-28 17:26:40.000000000 +0200
-@@ -0,0 +1,645 @@
-+/*
-+ * Copyright (c) 2002 Dieter Shirley
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2 of the License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include <stdlib.h>
-+#include <stdio.h>
-+#include "../dsputil.h"
-+#include "../mpegvideo.h"
-+
-+#include "gcc_fixes.h"
-+
-+#include "dsputil_altivec.h"
-+
-+// Swaps two variables (used for altivec registers)
-+#define SWAP(a,b) \
-+do { \
-+ __typeof__(a) swap_temp=a; \
-+ a=b; \
-+ b=swap_temp; \
-+} while (0)
-+
-+// transposes a matrix consisting of four vectors with four elements each
-+#define TRANSPOSE4(a,b,c,d) \
-+do { \
-+ __typeof__(a) _trans_ach = vec_mergeh(a, c); \
-+ __typeof__(a) _trans_acl = vec_mergel(a, c); \
-+ __typeof__(a) _trans_bdh = vec_mergeh(b, d); \
-+ __typeof__(a) _trans_bdl = vec_mergel(b, d); \
-+ \
-+ a = vec_mergeh(_trans_ach, _trans_bdh); \
-+ b = vec_mergel(_trans_ach, _trans_bdh); \
-+ c = vec_mergeh(_trans_acl, _trans_bdl); \
-+ d = vec_mergel(_trans_acl, _trans_bdl); \
-+} while (0)
-+
-+#define TRANSPOSE8(a,b,c,d,e,f,g,h) \
-+do { \
-+ __typeof__(a) _A1, _B1, _C1, _D1, _E1, _F1, _G1, _H1; \
-+ __typeof__(a) _A2, _B2, _C2, _D2, _E2, _F2, _G2, _H2; \
-+ \
-+ _A1 = vec_mergeh (a, e); \
-+ _B1 = vec_mergel (a, e); \
-+ _C1 = vec_mergeh (b, f); \
-+ _D1 = vec_mergel (b, f); \
-+ _E1 = vec_mergeh (c, g); \
-+ _F1 = vec_mergel (c, g); \
-+ _G1 = vec_mergeh (d, h); \
-+ _H1 = vec_mergel (d, h); \
-+ \
-+ _A2 = vec_mergeh (_A1, _E1); \
-+ _B2 = vec_mergel (_A1, _E1); \
-+ _C2 = vec_mergeh (_B1, _F1); \
-+ _D2 = vec_mergel (_B1, _F1); \
-+ _E2 = vec_mergeh (_C1, _G1); \
-+ _F2 = vec_mergel (_C1, _G1); \
-+ _G2 = vec_mergeh (_D1, _H1); \
-+ _H2 = vec_mergel (_D1, _H1); \
-+ \
-+ a = vec_mergeh (_A2, _E2); \
-+ b = vec_mergel (_A2, _E2); \
-+ c = vec_mergeh (_B2, _F2); \
-+ d = vec_mergel (_B2, _F2); \
-+ e = vec_mergeh (_C2, _G2); \
-+ f = vec_mergel (_C2, _G2); \
-+ g = vec_mergeh (_D2, _H2); \
-+ h = vec_mergel (_D2, _H2); \
-+} while (0)
-+
-+
-+// Loads a four-byte value (int or float) from the target address
-+// into every element in the target vector. Only works if the
-+// target address is four-byte aligned (which should be always).
-+#define LOAD4(vec, address) \
-+{ \
-+ __typeof__(vec)* _load_addr = (__typeof__(vec)*)(address); \
-+ vector unsigned char _perm_vec = vec_lvsl(0,(address)); \
-+ vec = vec_ld(0, _load_addr); \
-+ vec = vec_perm(vec, vec, _perm_vec); \
-+ vec = vec_splat(vec, 0); \
-+}
-+
-+
-+#ifdef CONFIG_DARWIN
-+#define FOUROF(a) (a)
-+#else
-+// slower, for dumb non-apple GCC
-+#define FOUROF(a) {a,a,a,a}
-+#endif
-+int dct_quantize_altivec(MpegEncContext* s,
-+ DCTELEM* data, int n,
-+ int qscale, int* overflow)
-+{
-+ int lastNonZero;
-+ vector float row0, row1, row2, row3, row4, row5, row6, row7;
-+ vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
-+ const vector float zero = (const vector float)FOUROF(0.);
-+
-+ // Load the data into the row/alt vectors
-+ {
-+ vector signed short data0, data1, data2, data3, data4, data5, data6, data7;
-+
-+ data0 = vec_ld(0, data);
-+ data1 = vec_ld(16, data);
-+ data2 = vec_ld(32, data);
-+ data3 = vec_ld(48, data);
-+ data4 = vec_ld(64, data);
-+ data5 = vec_ld(80, data);
-+ data6 = vec_ld(96, data);
-+ data7 = vec_ld(112, data);
-+
-+ // Transpose the data before we start
-+ TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
-+
-+ // load the data into floating point vectors. We load
-+ // the high half of each row into the main row vectors
-+ // and the low half into the alt vectors.
-+ row0 = vec_ctf(vec_unpackh(data0), 0);
-+ alt0 = vec_ctf(vec_unpackl(data0), 0);
-+ row1 = vec_ctf(vec_unpackh(data1), 0);
-+ alt1 = vec_ctf(vec_unpackl(data1), 0);
-+ row2 = vec_ctf(vec_unpackh(data2), 0);
-+ alt2 = vec_ctf(vec_unpackl(data2), 0);
-+ row3 = vec_ctf(vec_unpackh(data3), 0);
-+ alt3 = vec_ctf(vec_unpackl(data3), 0);
-+ row4 = vec_ctf(vec_unpackh(data4), 0);
-+ alt4 = vec_ctf(vec_unpackl(data4), 0);
-+ row5 = vec_ctf(vec_unpackh(data5), 0);
-+ alt5 = vec_ctf(vec_unpackl(data5), 0);
-+ row6 = vec_ctf(vec_unpackh(data6), 0);
-+ alt6 = vec_ctf(vec_unpackl(data6), 0);
-+ row7 = vec_ctf(vec_unpackh(data7), 0);
-+ alt7 = vec_ctf(vec_unpackl(data7), 0);
-+ }
-+
-+ // The following block could exist as a separate an altivec dct
-+ // function. However, if we put it inline, the DCT data can remain
-+ // in the vector local variables, as floats, which we'll use during the
-+ // quantize step...
-+ {
-+ const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f);
-+ const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f);
-+ const vector float vec_0_541196100 = (vector float)FOUROF(0.541196100f);
-+ const vector float vec_0_765366865 = (vector float)FOUROF(0.765366865f);
-+ const vector float vec_0_899976223 = (vector float)FOUROF(-0.899976223f);
-+ const vector float vec_1_175875602 = (vector float)FOUROF(1.175875602f);
-+ const vector float vec_1_501321110 = (vector float)FOUROF(1.501321110f);
-+ const vector float vec_1_847759065 = (vector float)FOUROF(-1.847759065f);
-+ const vector float vec_1_961570560 = (vector float)FOUROF(-1.961570560f);
-+ const vector float vec_2_053119869 = (vector float)FOUROF(2.053119869f);
-+ const vector float vec_2_562915447 = (vector float)FOUROF(-2.562915447f);
-+ const vector float vec_3_072711026 = (vector float)FOUROF(3.072711026f);
-+
-+
-+ int whichPass, whichHalf;
-+
-+ for(whichPass = 1; whichPass<=2; whichPass++)
-+ {
-+ for(whichHalf = 1; whichHalf<=2; whichHalf++)
-+ {
-+ vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
-+ vector float tmp10, tmp11, tmp12, tmp13;
-+ vector float z1, z2, z3, z4, z5;
-+
-+ tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7];
-+ tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7];
-+ tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4];
-+ tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4];
-+ tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6];
-+ tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6];
-+ tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5];
-+ tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5];
-+
-+ tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3;
-+ tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3;
-+ tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2;
-+ tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2;
-+
-+
-+ // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS);
-+ row0 = vec_add(tmp10, tmp11);
-+
-+ // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
-+ row4 = vec_sub(tmp10, tmp11);
-+
-+
-+ // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
-+ z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero);
-+
-+ // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
-+ // CONST_BITS-PASS1_BITS);
-+ row2 = vec_madd(tmp13, vec_0_765366865, z1);
-+
-+ // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
-+ // CONST_BITS-PASS1_BITS);
-+ row6 = vec_madd(tmp12, vec_1_847759065, z1);
-+
-+ z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7;
-+ z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6;
-+ z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6;
-+ z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7;
-+
-+ // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
-+ z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero);
-+
-+ // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
-+ z3 = vec_madd(z3, vec_1_961570560, z5);
-+
-+ // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
-+ z4 = vec_madd(z4, vec_0_390180644, z5);
-+
-+ // The following adds are rolled into the multiplies above
-+ // z3 = vec_add(z3, z5); // z3 += z5;
-+ // z4 = vec_add(z4, z5); // z4 += z5;
-+
-+ // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
-+ // Wow! It's actually more effecient to roll this multiply
-+ // into the adds below, even thought the multiply gets done twice!
-+ // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero);
-+
-+ // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
-+ // Same with this one...
-+ // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero);
-+
-+ // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
-+ // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
-+ row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3));
-+
-+ // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
-+ // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS);
-+ row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4));
-+
-+ // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
-+ // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS);
-+ row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3));
-+
-+ // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
-+ // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS);
-+ row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4));
-+
-+ // Swap the row values with the alts. If this is the first half,
-+ // this sets up the low values to be acted on in the second half.
-+ // If this is the second half, it puts the high values back in
-+ // the row values where they are expected to be when we're done.
-+ SWAP(row0, alt0);
-+ SWAP(row1, alt1);
-+ SWAP(row2, alt2);
-+ SWAP(row3, alt3);
-+ SWAP(row4, alt4);
-+ SWAP(row5, alt5);
-+ SWAP(row6, alt6);
-+ SWAP(row7, alt7);
-+ }
-+
-+ if (whichPass == 1)
-+ {
-+ // transpose the data for the second pass
-+
-+ // First, block transpose the upper right with lower left.
-+ SWAP(row4, alt0);
-+ SWAP(row5, alt1);
-+ SWAP(row6, alt2);
-+ SWAP(row7, alt3);
-+
-+ // Now, transpose each block of four
-+ TRANSPOSE4(row0, row1, row2, row3);
-+ TRANSPOSE4(row4, row5, row6, row7);
-+ TRANSPOSE4(alt0, alt1, alt2, alt3);
-+ TRANSPOSE4(alt4, alt5, alt6, alt7);
-+ }
-+ }
-+ }
-+
-+ // used after quantise step
-+ int oldBaseValue = 0;
-+
-+ // perform the quantise step, using the floating point data
-+ // still in the row/alt registers
-+ {
-+ const int* biasAddr;
-+ const vector signed int* qmat;
-+ vector float bias, negBias;
-+
-+ if (s->mb_intra)
-+ {
-+ vector signed int baseVector;
-+
-+ // We must cache element 0 in the intra case
-+ // (it needs special handling).
-+ baseVector = vec_cts(vec_splat(row0, 0), 0);
-+ vec_ste(baseVector, 0, &oldBaseValue);
-+
-+ qmat = (vector signed int*)s->q_intra_matrix[qscale];
-+ biasAddr = &(s->intra_quant_bias);
-+ }
-+ else
-+ {
-+ qmat = (vector signed int*)s->q_inter_matrix[qscale];
-+ biasAddr = &(s->inter_quant_bias);
-+ }
-+
-+ // Load the bias vector (We add 0.5 to the bias so that we're
-+ // rounding when we convert to int, instead of flooring.)
-+ {
-+ vector signed int biasInt;
-+ const vector float negOneFloat = (vector float)FOUROF(-1.0f);
-+ LOAD4(biasInt, biasAddr);
-+ bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT);
-+ negBias = vec_madd(bias, negOneFloat, zero);
-+ }
-+
-+ {
-+ vector float q0, q1, q2, q3, q4, q5, q6, q7;
-+
-+ q0 = vec_ctf(qmat[0], QMAT_SHIFT);
-+ q1 = vec_ctf(qmat[2], QMAT_SHIFT);
-+ q2 = vec_ctf(qmat[4], QMAT_SHIFT);
-+ q3 = vec_ctf(qmat[6], QMAT_SHIFT);
-+ q4 = vec_ctf(qmat[8], QMAT_SHIFT);
-+ q5 = vec_ctf(qmat[10], QMAT_SHIFT);
-+ q6 = vec_ctf(qmat[12], QMAT_SHIFT);
-+ q7 = vec_ctf(qmat[14], QMAT_SHIFT);
-+
-+ row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias),
-+ vec_cmpgt(row0, zero));
-+ row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias),
-+ vec_cmpgt(row1, zero));
-+ row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias),
-+ vec_cmpgt(row2, zero));
-+ row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias),
-+ vec_cmpgt(row3, zero));
-+ row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias),
-+ vec_cmpgt(row4, zero));
-+ row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias),
-+ vec_cmpgt(row5, zero));
-+ row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias),
-+ vec_cmpgt(row6, zero));
-+ row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias),
-+ vec_cmpgt(row7, zero));
-+
-+ q0 = vec_ctf(qmat[1], QMAT_SHIFT);
-+ q1 = vec_ctf(qmat[3], QMAT_SHIFT);
-+ q2 = vec_ctf(qmat[5], QMAT_SHIFT);
-+ q3 = vec_ctf(qmat[7], QMAT_SHIFT);
-+ q4 = vec_ctf(qmat[9], QMAT_SHIFT);
-+ q5 = vec_ctf(qmat[11], QMAT_SHIFT);
-+ q6 = vec_ctf(qmat[13], QMAT_SHIFT);
-+ q7 = vec_ctf(qmat[15], QMAT_SHIFT);
-+
-+ alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias),
-+ vec_cmpgt(alt0, zero));
-+ alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias),
-+ vec_cmpgt(alt1, zero));
-+ alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias),
-+ vec_cmpgt(alt2, zero));
-+ alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias),
-+ vec_cmpgt(alt3, zero));
-+ alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias),
-+ vec_cmpgt(alt4, zero));
-+ alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias),
-+ vec_cmpgt(alt5, zero));
-+ alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias),
-+ vec_cmpgt(alt6, zero));
-+ alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias),
-+ vec_cmpgt(alt7, zero));
-+ }
-+
-+
-+ }
-+
-+ // Store the data back into the original block
-+ {
-+ vector signed short data0, data1, data2, data3, data4, data5, data6, data7;
-+
-+ data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0));
-+ data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0));
-+ data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0));
-+ data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0));
-+ data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0));
-+ data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0));
-+ data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0));
-+ data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0));
-+
-+ {
-+ // Clamp for overflow
-+ vector signed int max_q_int, min_q_int;
-+ vector signed short max_q, min_q;
-+
-+ LOAD4(max_q_int, &(s->max_qcoeff));
-+ LOAD4(min_q_int, &(s->min_qcoeff));
-+
-+ max_q = vec_pack(max_q_int, max_q_int);
-+ min_q = vec_pack(min_q_int, min_q_int);
-+
-+ data0 = vec_max(vec_min(data0, max_q), min_q);
-+ data1 = vec_max(vec_min(data1, max_q), min_q);
-+ data2 = vec_max(vec_min(data2, max_q), min_q);
-+ data4 = vec_max(vec_min(data4, max_q), min_q);
-+ data5 = vec_max(vec_min(data5, max_q), min_q);
-+ data6 = vec_max(vec_min(data6, max_q), min_q);
-+ data7 = vec_max(vec_min(data7, max_q), min_q);
-+ }
-+
-+ vector bool char zero_01, zero_23, zero_45, zero_67;
-+ vector signed char scanIndices_01, scanIndices_23, scanIndices_45, scanIndices_67;
-+ vector signed char negOne = vec_splat_s8(-1);
-+ vector signed char* scanPtr =
-+ (vector signed char*)(s->intra_scantable.inverse);
-+
-+ // Determine the largest non-zero index.
-+ zero_01 = vec_pack(vec_cmpeq(data0, (vector short)zero),
-+ vec_cmpeq(data1, (vector short)zero));
-+ zero_23 = vec_pack(vec_cmpeq(data2, (vector short)zero),
-+ vec_cmpeq(data3, (vector short)zero));
-+ zero_45 = vec_pack(vec_cmpeq(data4, (vector short)zero),
-+ vec_cmpeq(data5, (vector short)zero));
-+ zero_67 = vec_pack(vec_cmpeq(data6, (vector short)zero),
-+ vec_cmpeq(data7, (vector short)zero));
-+
-+ // 64 biggest values
-+ scanIndices_01 = vec_sel(scanPtr[0], negOne, zero_01);
-+ scanIndices_23 = vec_sel(scanPtr[1], negOne, zero_23);
-+ scanIndices_45 = vec_sel(scanPtr[2], negOne, zero_45);
-+ scanIndices_67 = vec_sel(scanPtr[3], negOne, zero_67);
-+
-+ // 32 largest values
-+ scanIndices_01 = vec_max(scanIndices_01, scanIndices_23);
-+ scanIndices_45 = vec_max(scanIndices_45, scanIndices_67);
-+
-+ // 16 largest values
-+ scanIndices_01 = vec_max(scanIndices_01, scanIndices_45);
-+
-+ // 8 largest values
-+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
-+ vec_mergel(scanIndices_01, negOne));
-+
-+ // 4 largest values
-+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
-+ vec_mergel(scanIndices_01, negOne));
-+
-+ // 2 largest values
-+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
-+ vec_mergel(scanIndices_01, negOne));
-+
-+ // largest value
-+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
-+ vec_mergel(scanIndices_01, negOne));
-+
-+ scanIndices_01 = vec_splat(scanIndices_01, 0);
-+
-+ signed char lastNonZeroChar;
-+
-+ vec_ste(scanIndices_01, 0, &lastNonZeroChar);
-+
-+ lastNonZero = lastNonZeroChar;
-+
-+ // While the data is still in vectors we check for the transpose IDCT permute
-+ // and handle it using the vector unit if we can. This is the permute used
-+ // by the altivec idct, so it is common when using the altivec dct.
-+
-+ if ((lastNonZero > 0) && (s->dsp.idct_permutation_type == FF_TRANSPOSE_IDCT_PERM))
-+ {
-+ TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
-+ }
-+
-+ vec_st(data0, 0, data);
-+ vec_st(data1, 16, data);
-+ vec_st(data2, 32, data);
-+ vec_st(data3, 48, data);
-+ vec_st(data4, 64, data);
-+ vec_st(data5, 80, data);
-+ vec_st(data6, 96, data);
-+ vec_st(data7, 112, data);
-+ }
-+
-+ // special handling of block[0]
-+ if (s->mb_intra)
-+ {
-+ if (!s->h263_aic)
-+ {
-+ if (n < 4)
-+ oldBaseValue /= s->y_dc_scale;
-+ else
-+ oldBaseValue /= s->c_dc_scale;
-+ }
-+
-+ // Divide by 8, rounding the result
-+ data[0] = (oldBaseValue + 4) >> 3;
-+ }
-+
-+ // We handled the tranpose permutation above and we don't
-+ // need to permute the "no" permutation case.
-+ if ((lastNonZero > 0) &&
-+ (s->dsp.idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) &&
-+ (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM))
-+ {
-+ ff_block_permute(data, s->dsp.idct_permutation,
-+ s->intra_scantable.scantable, lastNonZero);
-+ }
-+
-+ return lastNonZero;
-+}
-+#undef FOUROF
-+
-+/*
-+ AltiVec version of dct_unquantize_h263
-+ this code assumes `block' is 16 bytes-aligned
-+*/
-+void dct_unquantize_h263_altivec(MpegEncContext *s,
-+ DCTELEM *block, int n, int qscale)
-+{
-+POWERPC_PERF_DECLARE(altivec_dct_unquantize_h263_num, 1);
-+ int i, level, qmul, qadd;
-+ int nCoeffs;
-+
-+ assert(s->block_last_index[n]>=0);
-+
-+POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
-+
-+ qadd = (qscale - 1) | 1;
-+ qmul = qscale << 1;
-+
-+ if (s->mb_intra) {
-+ if (!s->h263_aic) {
-+ if (n < 4)
-+ block[0] = block[0] * s->y_dc_scale;
-+ else
-+ block[0] = block[0] * s->c_dc_scale;
-+ }else
-+ qadd = 0;
-+ i = 1;
-+ nCoeffs= 63; //does not allways use zigzag table
-+ } else {
-+ i = 0;
-+ nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
-+ }
-+
-+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-+ for(;i<=nCoeffs;i++) {
-+ level = block[i];
-+ if (level) {
-+ if (level < 0) {
-+ level = level * qmul - qadd;
-+ } else {
-+ level = level * qmul + qadd;
-+ }
-+ block[i] = level;
-+ }
-+ }
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ {
-+ register const vector short vczero = (const vector short)vec_splat_s16(0);
-+ short __attribute__ ((aligned(16))) qmul8[] =
-+ {
-+ qmul, qmul, qmul, qmul,
-+ qmul, qmul, qmul, qmul
-+ };
-+ short __attribute__ ((aligned(16))) qadd8[] =
-+ {
-+ qadd, qadd, qadd, qadd,
-+ qadd, qadd, qadd, qadd
-+ };
-+ short __attribute__ ((aligned(16))) nqadd8[] =
-+ {
-+ -qadd, -qadd, -qadd, -qadd,
-+ -qadd, -qadd, -qadd, -qadd
-+ };
-+ register vector short blockv, qmulv, qaddv, nqaddv, temp1;
-+ register vector bool short blockv_null, blockv_neg;
-+ register short backup_0 = block[0];
-+ register int j = 0;
-+
-+ qmulv = vec_ld(0, qmul8);
-+ qaddv = vec_ld(0, qadd8);
-+ nqaddv = vec_ld(0, nqadd8);
-+
-+#if 0 // block *is* 16 bytes-aligned, it seems.
-+ // first make sure block[j] is 16 bytes-aligned
-+ for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) {
-+ level = block[j];
-+ if (level) {
-+ if (level < 0) {
-+ level = level * qmul - qadd;
-+ } else {
-+ level = level * qmul + qadd;
-+ }
-+ block[j] = level;
-+ }
-+ }
-+#endif
-+
-+ // vectorize all the 16 bytes-aligned blocks
-+ // of 8 elements
-+ for(; (j + 7) <= nCoeffs ; j+=8)
-+ {
-+ blockv = vec_ld(j << 1, block);
-+ blockv_neg = vec_cmplt(blockv, vczero);
-+ blockv_null = vec_cmpeq(blockv, vczero);
-+ // choose between +qadd or -qadd as the third operand
-+ temp1 = vec_sel(qaddv, nqaddv, blockv_neg);
-+ // multiply & add (block{i,i+7} * qmul [+-] qadd)
-+ temp1 = vec_mladd(blockv, qmulv, temp1);
-+ // put 0 where block[{i,i+7} used to have 0
-+ blockv = vec_sel(temp1, blockv, blockv_null);
-+ vec_st(blockv, j << 1, block);
-+ }
-+
-+ // if nCoeffs isn't a multiple of 8, finish the job
-+ // using good old scalar units.
-+ // (we could do it using a truncated vector,
-+ // but I'm not sure it's worth the hassle)
-+ for(; j <= nCoeffs ; j++) {
-+ level = block[j];
-+ if (level) {
-+ if (level < 0) {
-+ level = level * qmul - qadd;
-+ } else {
-+ level = level * qmul + qadd;
-+ }
-+ block[j] = level;
-+ }
-+ }
-+
-+ if (i == 1)
-+ { // cheat. this avoid special-casing the first iteration
-+ block[0] = backup_0;
-+ }
-+ }
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+
-+POWERPC_PERF_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63);
-+}
-diff -Nur avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/mpegvideo_ppc.c avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/mpegvideo_ppc.c
---- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc.orig/mpegvideo_ppc.c 1970-01-01 01:00:00.000000000 +0100
-+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/mpegvideo_ppc.c 2003-09-28 17:26:40.000000000 +0200
-@@ -0,0 +1,83 @@
-+/*\r
-+ * Copyright (c) 2002 Dieter Shirley\r
-+ *\r
-+ * This library is free software; you can redistribute it and/or\r
-+ * modify it under the terms of the GNU Lesser General Public\r
-+ * License as published by the Free Software Foundation; either\r
-+ * version 2 of the License, or (at your option) any later version.\r
-+ *\r
-+ * This library is distributed in the hope that it will be useful,\r
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r
-+ * Lesser General Public License for more details.\r
-+ *\r
-+ * You should have received a copy of the GNU Lesser General Public\r
-+ * License along with this library; if not, write to the Free Software\r
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r
-+ */\r
-+ \r
-+#include "../dsputil.h"\r
-+#include "../mpegvideo.h"\r
-+#include <time.h>\r
-+\r
-+#ifdef HAVE_ALTIVEC\r
-+#include "dsputil_altivec.h"\r
-+#endif\r
-+\r
-+extern int dct_quantize_altivec(MpegEncContext *s, \r
-+ DCTELEM *block, int n,\r
-+ int qscale, int *overflow);\r
-+extern void dct_unquantize_h263_altivec(MpegEncContext *s,
-+ DCTELEM *block, int n, int qscale);
-+\r
-+extern void idct_put_altivec(uint8_t *dest, int line_size, int16_t *block);\r
-+extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block);\r
-+\r
-+\r
-+void MPV_common_init_ppc(MpegEncContext *s)\r
-+{\r
-+#if HAVE_ALTIVEC\r
-+ if (has_altivec())\r
-+ {\r
-+ if ((s->avctx->idct_algo == FF_IDCT_AUTO) ||\r
-+ (s->avctx->idct_algo == FF_IDCT_ALTIVEC))\r
-+ {\r
-+ s->dsp.idct_put = idct_put_altivec;\r
-+ s->dsp.idct_add = idct_add_altivec;\r
-+#ifndef ALTIVEC_USE_REFERENCE_C_CODE
-+ s->dsp.idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;\r
-+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ s->dsp.idct_permutation_type = FF_NO_IDCT_PERM;
-+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-+ }\r
-+\r
-+ // Test to make sure that the dct required alignments are met.\r
-+ if ((((long)(s->q_intra_matrix) & 0x0f) != 0) ||\r
-+ (((long)(s->q_inter_matrix) & 0x0f) != 0))\r
-+ {\r
-+ fprintf(stderr, "Internal Error: q-matrix blocks must be 16-byte aligned "\r
-+ "to use Altivec DCT. Reverting to non-altivec version.\n");\r
-+ return;\r
-+ }\r
-+\r
-+ if (((long)(s->intra_scantable.inverse) & 0x0f) != 0)\r
-+ {\r
-+ fprintf(stderr, "Internal Error: scan table blocks must be 16-byte aligned "\r
-+ "to use Altivec DCT. Reverting to non-altivec version.\n");\r
-+ return;\r
-+ }\r
-+\r
-+\r
-+ if ((s->avctx->dct_algo == FF_DCT_AUTO) ||\r
-+ (s->avctx->dct_algo == FF_DCT_ALTIVEC))\r
-+ {\r
-+ s->dct_quantize = dct_quantize_altivec;\r
-+ s->dct_unquantize_h263 = dct_unquantize_h263_altivec;
-+ }\r
-+ } else\r
-+#endif\r
-+ {\r
-+ /* Non-AltiVec PPC optimisations here */\r
-+ }\r
-+}\r
-+\r
--- avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/Makefile.am.orig 2003-05-25 23:11:57.000000000 +0200
+++ avifile-0.7-0.7.38/ffmpeg/libavcodec/ppc/Makefile.am 2003-11-14 01:06:03.904622008 +0100
@@ -20,6 +20,6 @@