Main Page | Modules | Class Hierarchy | Class List | Directories | File List | Class Members | File Members | Related Pages

swscale_altivec_template.c

00001 /*
00002   AltiVec-enhanced yuv2yuvX
00003 
00004     Copyright (C) 2004 Romain Dolbeau <[email protected]>
00005     based on the equivalent C code in "postproc/swscale.c"
00006 
00007 
00008     This program is free software; you can redistribute it and/or modify
00009     it under the terms of the GNU General Public License as published by
00010     the Free Software Foundation; either version 2 of the License, or
00011     (at your option) any later version.
00012 
00013     This program is distributed in the hope that it will be useful,
00014     but WITHOUT ANY WARRANTY; without even the implied warranty of
00015     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00016     GNU General Public License for more details.
00017 
00018     You should have received a copy of the GNU General Public License
00019     along with this program; if not, write to the Free Software
00020     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
00021 */
00022 
00023 #ifdef CONFIG_DARWIN
00024 #define AVV(x...) (x)
00025 #else
00026 #define AVV(x...) {x}
00027 #endif
00028 
00029 static const vector signed int vzero =
00030   (const vector signed int)AVV(0, 0, 0, 0);
00031 static const vector unsigned int altivec_vectorShiftInt19 =
00032   (const vector unsigned int)AVV(19, 19, 19, 19);
00033 
00034 static inline void
00035 altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) {
00036   register int i;
00037   if ((unsigned long)dest % 16) {
00038     /* badly aligned store, we force store alignement */
00039     /* and will handle load misalignement on val w/ vec_perm */
00040     for (i = 0 ; (i < dstW) &&
00041            (((unsigned long)dest + i) % 16) ; i++) {
00042       int t = val[i] >> 19;
00043       dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
00044     }
00045     vector unsigned char perm1 = vec_lvsl(i << 2, val);
00046     vector signed int v1 = vec_ld(i << 2, val);
00047     for ( ; i < (dstW - 15); i+=16) {
00048       int offset = i << 2;
00049       vector signed int v2 = vec_ld(offset + 16, val);
00050       vector signed int v3 = vec_ld(offset + 32, val);
00051       vector signed int v4 = vec_ld(offset + 48, val);
00052       vector signed int v5 = vec_ld(offset + 64, val);
00053       vector signed int v12 = vec_perm(v1,v2,perm1);
00054       vector signed int v23 = vec_perm(v2,v3,perm1);
00055       vector signed int v34 = vec_perm(v3,v4,perm1);
00056       vector signed int v45 = vec_perm(v4,v5,perm1);
00057       
00058       vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
00059       vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
00060       vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
00061       vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
00062       vector unsigned short vs1 = vec_packsu(vA, vB);
00063       vector unsigned short vs2 = vec_packsu(vC, vD);
00064       vector unsigned char vf = vec_packsu(vs1, vs2);
00065       vec_st(vf, i, dest);
00066       v1 = v5;
00067     }
00068   } else { // dest is properly aligned, great
00069     for (i = 0; i < (dstW - 15); i+=16) {
00070       int offset = i << 2;
00071       vector signed int v1 = vec_ld(offset, val);
00072       vector signed int v2 = vec_ld(offset + 16, val);
00073       vector signed int v3 = vec_ld(offset + 32, val);
00074       vector signed int v4 = vec_ld(offset + 48, val);
00075       vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
00076       vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
00077       vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
00078       vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
00079       vector unsigned short vs1 = vec_packsu(v5, v6);
00080       vector unsigned short vs2 = vec_packsu(v7, v8);
00081       vector unsigned char vf = vec_packsu(vs1, vs2);
00082       vec_st(vf, i, dest);
00083     }
00084   }
00085   for ( ; i < dstW ; i++) {
00086     int t = val[i] >> 19;
00087     dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
00088   }
00089 }
00090 
00091 static inline void
00092 yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
00093                       int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
00094                       uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
00095 {
00096   const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
00097   register int i, j;
00098   {
00099     int __attribute__ ((aligned (16))) val[dstW];
00100     
00101     for (i = 0; i < (dstW -7); i+=4) {
00102       vec_st(vini, i << 2, val);
00103     }
00104     for (; i < dstW; i++) {
00105       val[i] = (1 << 18);
00106     }
00107     
00108     for (j = 0; j < lumFilterSize; j++) {
00109       vector signed short vLumFilter = vec_ld(j << 1, lumFilter);
00110       vector unsigned char perm0 = vec_lvsl(j << 1, lumFilter);
00111       vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
00112       vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
00113       
00114       vector unsigned char perm = vec_lvsl(0, lumSrc[j]);
00115       vector signed short l1 = vec_ld(0, lumSrc[j]);
00116       
00117       for (i = 0; i < (dstW - 7); i+=8) {
00118         int offset = i << 2;
00119         vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
00120         
00121         vector signed int v1 = vec_ld(offset, val);
00122         vector signed int v2 = vec_ld(offset + 16, val);
00123         
00124         vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
00125         
00126         vector signed int i1 = vec_mule(vLumFilter, ls);
00127         vector signed int i2 = vec_mulo(vLumFilter, ls);
00128         
00129         vector signed int vf1 = vec_mergeh(i1, i2);
00130         vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
00131         
00132         vector signed int vo1 = vec_add(v1, vf1);
00133         vector signed int vo2 = vec_add(v2, vf2);
00134         
00135         vec_st(vo1, offset, val);
00136         vec_st(vo2, offset + 16, val);
00137         
00138         l1 = l2;
00139       }
00140       for ( ; i < dstW; i++) {
00141         val[i] += lumSrc[j][i] * lumFilter[j];
00142       }
00143     }
00144     altivec_packIntArrayToCharArray(val,dest,dstW);
00145   }
00146   if (uDest != 0) {
00147     int  __attribute__ ((aligned (16))) u[chrDstW];
00148     int  __attribute__ ((aligned (16))) v[chrDstW];
00149 
00150     for (i = 0; i < (chrDstW -7); i+=4) {
00151       vec_st(vini, i << 2, u);
00152       vec_st(vini, i << 2, v);
00153     }
00154     for (; i < chrDstW; i++) {
00155       u[i] = (1 << 18);
00156       v[i] = (1 << 18);
00157     }
00158     
00159     for (j = 0; j < chrFilterSize; j++) {
00160       vector signed short vChrFilter = vec_ld(j << 1, chrFilter);
00161       vector unsigned char perm0 = vec_lvsl(j << 1, chrFilter);
00162       vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
00163       vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
00164       
00165       vector unsigned char perm = vec_lvsl(0, chrSrc[j]);
00166       vector signed short l1 = vec_ld(0, chrSrc[j]);
00167       vector signed short l1_V = vec_ld(2048 << 1, chrSrc[j]);
00168       
00169       for (i = 0; i < (chrDstW - 7); i+=8) {
00170         int offset = i << 2;
00171         vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]);
00172         vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]);
00173         
00174         vector signed int v1 = vec_ld(offset, u);
00175         vector signed int v2 = vec_ld(offset + 16, u);
00176         vector signed int v1_V = vec_ld(offset, v);
00177         vector signed int v2_V = vec_ld(offset + 16, v);
00178         
00179         vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7]
00180         vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
00181         
00182         vector signed int i1 = vec_mule(vChrFilter, ls);
00183         vector signed int i2 = vec_mulo(vChrFilter, ls);
00184         vector signed int i1_V = vec_mule(vChrFilter, ls_V);
00185         vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
00186         
00187         vector signed int vf1 = vec_mergeh(i1, i2);
00188         vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
00189         vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
00190         vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
00191         
00192         vector signed int vo1 = vec_add(v1, vf1);
00193         vector signed int vo2 = vec_add(v2, vf2);
00194         vector signed int vo1_V = vec_add(v1_V, vf1_V);
00195         vector signed int vo2_V = vec_add(v2_V, vf2_V);
00196         
00197         vec_st(vo1, offset, u);
00198         vec_st(vo2, offset + 16, u);
00199         vec_st(vo1_V, offset, v);
00200         vec_st(vo2_V, offset + 16, v);
00201         
00202         l1 = l2;
00203         l1_V = l2_V;
00204       }
00205       for ( ; i < chrDstW; i++) {
00206         u[i] += chrSrc[j][i] * chrFilter[j];
00207         v[i] += chrSrc[j][i + 2048] * chrFilter[j];
00208       } 
00209     }
00210     altivec_packIntArrayToCharArray(u,uDest,chrDstW);
00211     altivec_packIntArrayToCharArray(v,vDest,chrDstW);
00212   }
00213 }
00214 
00215 static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) {
00216   register int i;
00217   int __attribute__ ((aligned (16))) tempo[4];
00218 
00219   if (filterSize % 4) {
00220     for(i=0; i<dstW; i++) {
00221       register int j;
00222       register int srcPos = filterPos[i];
00223       register int val = 0;
00224       for(j=0; j<filterSize; j++) {
00225         val += ((int)src[srcPos + j])*filter[filterSize*i + j];
00226       }
00227       dst[i] = MIN(MAX(0, val>>7), (1<<15)-1);
00228     }
00229   }
00230   else
00231   switch (filterSize) {
00232   case 4:
00233     {
00234       for(i=0; i<dstW; i++) {
00235         register int j;
00236         register int srcPos = filterPos[i];
00237 
00238         vector unsigned char src_v0 = vec_ld(srcPos, src);
00239         vector unsigned char src_v1;
00240         if ((((int)src + srcPos)% 16) > 12) {
00241           src_v1 = vec_ld(srcPos + 16, src);
00242         }
00243         vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
00244 
00245         vector signed short src_v = // vec_unpackh sign-extends...
00246           (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
00247         // now put our elements in the even slots
00248         src_v = vec_mergeh(src_v, (vector signed short)vzero);
00249 
00250         vector signed short filter_v = vec_ld(i << 3, filter);
00251         // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2)
00252 
00253         // the neat trick : we only care for half the elements,
00254         // high or low depending on (i<<3)%16 (it's 0 or 8 here),
00255         // and we're going to use vec_mule, so we chose
00256         // carefully how to "unpack" the elements into the even slots
00257         if ((i << 3) % 16)
00258           filter_v = vec_mergel(filter_v,(vector signed short)vzero);
00259         else
00260           filter_v = vec_mergeh(filter_v,(vector signed short)vzero);
00261 
00262         vector signed int val_vEven = vec_mule(src_v, filter_v);
00263         vector signed int val_s = vec_sums(val_vEven, vzero);
00264         vec_st(val_s, 0, tempo);
00265         dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
00266       }
00267     }
00268     break;
00269 
00270   case 8:
00271     {
00272       for(i=0; i<dstW; i++) {
00273         register int srcPos = filterPos[i];
00274 
00275         vector unsigned char src_v0 = vec_ld(srcPos, src);
00276         vector unsigned char src_v1;
00277         if ((((int)src + srcPos)% 16) > 8) {
00278           src_v1 = vec_ld(srcPos + 16, src);
00279         }
00280         vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
00281 
00282         vector signed short src_v = // vec_unpackh sign-extends...
00283           (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
00284         vector signed short filter_v = vec_ld(i << 4, filter);
00285         // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
00286 
00287         vector signed int val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
00288         vector signed int val_s = vec_sums(val_v, vzero);
00289         vec_st(val_s, 0, tempo);
00290         dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
00291       }
00292     }
00293     break;
00294 
00295   case 16:
00296     {
00297       for(i=0; i<dstW; i++) {
00298         register int srcPos = filterPos[i];
00299 
00300         vector unsigned char src_v0 = vec_ld(srcPos, src);
00301         vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
00302         vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
00303 
00304         vector signed short src_vA = // vec_unpackh sign-extends...
00305           (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
00306         vector signed short src_vB = // vec_unpackh sign-extends...
00307           (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
00308 
00309         vector signed short filter_v0 = vec_ld(i << 5, filter);
00310         vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
00311         // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
00312 
00313         vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
00314         vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
00315 
00316         vector signed int val_s = vec_sums(val_v, vzero);
00317 
00318         vec_st(val_s, 0, tempo);
00319         dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
00320       }
00321     }
00322     break;
00323     
00324   default:
00325     {
00326       for(i=0; i<dstW; i++) {
00327         register int j;
00328         register int srcPos = filterPos[i];
00329 
00330         vector signed int val_v = (vector signed int)vzero;
00331         vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
00332         vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
00333 
00334         vector unsigned char src_v0 = vec_ld(srcPos, src);
00335         vector unsigned char permS = vec_lvsl(srcPos, src);
00336 
00337         for (j = 0 ; j < filterSize - 15; j += 16) {
00338           vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
00339           vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
00340           
00341           vector signed short src_vA = // vec_unpackh sign-extends...
00342             (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
00343           vector signed short src_vB = // vec_unpackh sign-extends...
00344             (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
00345           
00346           vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
00347           vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
00348           vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
00349           vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
00350           
00351           vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
00352           val_v = vec_msums(src_vB, filter_v1, val_acc);
00353 
00354           filter_v0R = filter_v2R;
00355           src_v0 = src_v1;
00356         }
00357 
00358         if (j < (filterSize-7)) {
00359           // loading src_v0 is useless, it's already done above
00360           //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
00361           vector unsigned char src_v1;
00362           if ((((int)src + srcPos)% 16) > 8) {
00363             src_v1 = vec_ld(srcPos + j + 16, src);
00364           }
00365           vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
00366           
00367           vector signed short src_v = // vec_unpackh sign-extends...
00368             (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
00369           // loading filter_v0R is useless, it's already done above
00370           //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
00371           vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
00372           vector signed short filter_v = vec_perm(filter_v0R, filter_v1R, permF);
00373           
00374           val_v = vec_msums(src_v, filter_v, val_v);
00375         }
00376 
00377         vector signed int val_s = vec_sums(val_v, vzero);
00378           
00379         vec_st(val_s, 0, tempo);
00380         dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);        
00381       }
00382       
00383     }
00384   }
00385 }
00386 
00387 static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
00388      int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
00389   uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
00390   // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
00391   uint8_t *ysrc = src[0];
00392   uint8_t *usrc = src[1];
00393   uint8_t *vsrc = src[2];
00394   const int width = c->srcW;
00395   const int height = srcSliceH;
00396   const int lumStride = srcStride[0];
00397   const int chromStride = srcStride[1];
00398   const int dstStride = dstStride_a[0];
00399   const vector unsigned char yperm = vec_lvsl(0, ysrc);
00400   const int vertLumPerChroma = 2;  
00401   register unsigned int y;
00402 
00403   /* this code assume:
00404 
00405   1) dst is 16 bytes-aligned
00406   2) dstStride is a multiple of 16
00407   3) width is a multiple of 16
00408   4) lum&chrom stride are multiple of 8
00409   */
00410   
00411   for(y=0; y<height; y++)
00412     {
00413       int i;
00414       for (i = 0; i < width - 31; i+= 32) {
00415         const unsigned int j = i >> 1;
00416         vector unsigned char v_yA = vec_ld(i, ysrc);
00417         vector unsigned char v_yB = vec_ld(i + 16, ysrc);
00418         vector unsigned char v_yC = vec_ld(i + 32, ysrc);
00419         vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
00420         vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
00421         vector unsigned char v_uA = vec_ld(j, usrc);
00422         vector unsigned char v_uB = vec_ld(j + 16, usrc);
00423         vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
00424         vector unsigned char v_vA = vec_ld(j, vsrc);
00425         vector unsigned char v_vB = vec_ld(j + 16, vsrc);
00426         vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
00427         vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
00428         vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
00429         vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
00430         vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
00431         vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
00432         vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
00433         vec_st(v_yuy2_0, (i << 1), dst);
00434         vec_st(v_yuy2_1, (i << 1) + 16, dst);
00435         vec_st(v_yuy2_2, (i << 1) + 32, dst);
00436         vec_st(v_yuy2_3, (i << 1) + 48, dst);
00437       }
00438       if (i < width) {
00439         const unsigned int j = i >> 1;
00440         vector unsigned char v_y1 = vec_ld(i, ysrc);
00441         vector unsigned char v_u = vec_ld(j, usrc);
00442         vector unsigned char v_v = vec_ld(j, vsrc);
00443         vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
00444         vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
00445         vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
00446         vec_st(v_yuy2_0, (i << 1), dst);
00447         vec_st(v_yuy2_1, (i << 1) + 16, dst);
00448       }
00449       if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
00450         {
00451           usrc += chromStride;
00452           vsrc += chromStride;
00453         }
00454       ysrc += lumStride;
00455       dst += dstStride;
00456     }
00457   
00458   return srcSliceH;
00459 }
00460 
00461 static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
00462      int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
00463   uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
00464   // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
00465   uint8_t *ysrc = src[0];
00466   uint8_t *usrc = src[1];
00467   uint8_t *vsrc = src[2];
00468   const int width = c->srcW;
00469   const int height = srcSliceH;
00470   const int lumStride = srcStride[0];
00471   const int chromStride = srcStride[1];
00472   const int dstStride = dstStride_a[0];
00473   const int vertLumPerChroma = 2;
00474   const vector unsigned char yperm = vec_lvsl(0, ysrc);
00475   register unsigned int y;
00476 
00477   /* this code assume:
00478 
00479   1) dst is 16 bytes-aligned
00480   2) dstStride is a multiple of 16
00481   3) width is a multiple of 16
00482   4) lum&chrom stride are multiple of 8
00483   */
00484   
00485   for(y=0; y<height; y++)
00486     {
00487       int i;
00488       for (i = 0; i < width - 31; i+= 32) {
00489         const unsigned int j = i >> 1;
00490         vector unsigned char v_yA = vec_ld(i, ysrc);
00491         vector unsigned char v_yB = vec_ld(i + 16, ysrc);
00492         vector unsigned char v_yC = vec_ld(i + 32, ysrc);
00493         vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
00494         vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
00495         vector unsigned char v_uA = vec_ld(j, usrc);
00496         vector unsigned char v_uB = vec_ld(j + 16, usrc);
00497         vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
00498         vector unsigned char v_vA = vec_ld(j, vsrc);
00499         vector unsigned char v_vB = vec_ld(j + 16, vsrc);
00500         vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
00501         vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
00502         vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
00503         vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
00504         vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
00505         vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
00506         vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
00507         vec_st(v_uyvy_0, (i << 1), dst);
00508         vec_st(v_uyvy_1, (i << 1) + 16, dst);
00509         vec_st(v_uyvy_2, (i << 1) + 32, dst);
00510         vec_st(v_uyvy_3, (i << 1) + 48, dst);
00511       }
00512       if (i < width) {
00513         const unsigned int j = i >> 1;
00514         vector unsigned char v_y1 = vec_ld(i, ysrc);
00515         vector unsigned char v_u = vec_ld(j, usrc);
00516         vector unsigned char v_v = vec_ld(j, vsrc);
00517         vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
00518         vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
00519         vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
00520         vec_st(v_uyvy_0, (i << 1), dst);
00521         vec_st(v_uyvy_1, (i << 1) + 16, dst);
00522       }
00523       if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
00524         {
00525           usrc += chromStride;
00526           vsrc += chromStride;
00527         }
00528       ysrc += lumStride;
00529       dst += dstStride;
00530     }
00531   return srcSliceH;
00532 }

Generated on Tue Dec 20 10:14:54 2005 for vlc-0.8.4a by  doxygen 1.4.2