summaryrefslogtreecommitdiffstats
path: root/experimental/tqtinterface/qt4/src/3rdparty/libpng/pngvcrd.c
diff options
context:
space:
mode:
Diffstat (limited to 'experimental/tqtinterface/qt4/src/3rdparty/libpng/pngvcrd.c')
-rw-r--r--experimental/tqtinterface/qt4/src/3rdparty/libpng/pngvcrd.c258
1 files changed, 129 insertions, 129 deletions
diff --git a/experimental/tqtinterface/qt4/src/3rdparty/libpng/pngvcrd.c b/experimental/tqtinterface/qt4/src/3rdparty/libpng/pngvcrd.c
index 53ddb87f0..1221a604c 100644
--- a/experimental/tqtinterface/qt4/src/3rdparty/libpng/pngvcrd.c
+++ b/experimental/tqtinterface/qt4/src/3rdparty/libpng/pngvcrd.c
@@ -71,7 +71,7 @@ png_mmx_support(void)
_asm _emit 0x0f //CPUID instruction
_asm _emit 0xa2
- and edx, 0x00800000 //tqmask out all bits but mmx bit(24)
+ and edx, 0x00800000 //mask out all bits but mmx bit(24)
cmp edx, 0 // 0 = mmx not supported
jz NOT_SUPPORTED // non-zero = Yes, mmx IS supported
@@ -94,19 +94,19 @@ NOT_SUPPORTED:
/* Combines the row recently read in with the previous row.
This routine takes care of alpha and transparency if requested.
This routine also handles the two methods of progressive display
- of interlaced images, depending on the tqmask value.
- The tqmask value describes which pixels are to be combined with
+ of interlaced images, depending on the mask value.
+ The mask value describes which pixels are to be combined with
the row. The pattern always repeats every 8 pixels, so just 8
bits are needed. A one indicates the pixel is to be combined; a
zero indicates the pixel is to be skipped. This is in addition
to any alpha or transparency value associated with the pixel. If
- you want all pixels to be combined, pass 0xff (255) in tqmask. */
+ you want all pixels to be combined, pass 0xff (255) in mask. */
/* Use this routine for x86 platform - uses faster MMX routine if machine
supports MMX */
void /* PRIVATE */
-png_combine_row(png_structp png_ptr, png_bytep row, int tqmask)
+png_combine_row(png_structp png_ptr, png_bytep row, int mask)
{
#ifdef PNG_USE_LOCAL_ARRAYS
const int png_pass_inc[7] = {8, 8, 4, 4, 2, 2, 1};
@@ -120,12 +120,12 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask)
png_mmx_support();
}
- if (tqmask == 0xff)
+ if (mask == 0xff)
{
png_memcpy(row, png_ptr->row_buf + 1,
(png_size_t)((png_ptr->width * png_ptr->row_info.pixel_depth + 7) >> 3));
}
- /* GRR: add "else if (tqmask == 0)" case?
+ /* GRR: add "else if (mask == 0)" case?
* or does png_combine_row() not even get called in that case? */
else
{
@@ -162,7 +162,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask)
for (i = 0; i < png_ptr->width; i++)
{
- if (m & tqmask)
+ if (m & mask)
{
int value;
@@ -220,7 +220,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask)
for (i = 0; i < png_ptr->width; i++)
{
- if (m & tqmask)
+ if (m & mask)
{
value = (*sp >> shift) & 0x3;
*dp &= (png_byte)((0x3f3f >> (6 - shift)) & 0xff);
@@ -274,7 +274,7 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask)
for (i = 0; i < png_ptr->width; i++)
{
- if (m & tqmask)
+ if (m & mask)
{
value = (*sp >> shift) & 0xf;
*dp &= (png_byte)((0xf0f >> (4 - shift)) & 0xff);
@@ -303,9 +303,9 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask)
png_bytep dstptr;
png_uint_32 len;
int m;
- int diff, untqmask;
+ int diff, unmask;
- __int64 tqmask0=0x0102040810204080;
+ __int64 mask0=0x0102040810204080;
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW)
/* && mmx_supported */ )
@@ -313,19 +313,19 @@ png_combine_row(png_structp png_ptr, png_bytep row, int tqmask)
srcptr = png_ptr->row_buf + 1;
dstptr = row;
m = 0x80;
- untqmask = ~tqmask;
+ unmask = ~mask;
len = png_ptr->width &~7; //reduce to multiple of 8
diff = png_ptr->width & 7; //amount lost
_asm
{
- movd mm7, untqmask //load bit pattern
+ movd mm7, unmask //load bit pattern
psubb mm6,mm6 //zero mm6
punpcklbw mm7,mm7
punpcklwd mm7,mm7
punpckldq mm7,mm7 //fill register with 8 masks
- movq mm0,tqmask0
+ movq mm0,mask0
pand mm0,mm7 //nonzero if keep byte
pcmpeqb mm0,mm6 //zeros->1s, v versa
@@ -355,7 +355,7 @@ mainloop8end:
cmp ecx,0
jz end8
- mov edx,tqmask
+ mov edx,mask
sal edx,24 //make low byte the high byte
secondloop8:
@@ -404,9 +404,9 @@ end8:
png_bytep srcptr;
png_bytep dstptr;
png_uint_32 len;
- int untqmask, diff;
- __int64 tqmask1=0x0101020204040808,
- tqmask0=0x1010202040408080;
+ int unmask, diff;
+ __int64 mask1=0x0101020204040808,
+ mask0=0x1010202040408080;
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW)
/* && mmx_supported */ )
@@ -414,19 +414,19 @@ end8:
srcptr = png_ptr->row_buf + 1;
dstptr = row;
- untqmask = ~tqmask;
+ unmask = ~mask;
len = (png_ptr->width)&~7;
diff = (png_ptr->width)&7;
_asm
{
- movd mm7, untqmask //load bit pattern
+ movd mm7, unmask //load bit pattern
psubb mm6,mm6 //zero mm6
punpcklbw mm7,mm7
punpcklwd mm7,mm7
punpckldq mm7,mm7 //fill register with 8 masks
- movq mm0,tqmask0
- movq mm1,tqmask1
+ movq mm0,mask0
+ movq mm1,mask1
pand mm0,mm7
pand mm1,mm7
@@ -468,7 +468,7 @@ mainloop16end:
cmp ecx,0
jz end16
- mov edx,tqmask
+ mov edx,mask
sal edx,24 //make low byte the high byte
secondloop16:
sal edx,1 //move high bit to CF
@@ -516,16 +516,16 @@ end16:
png_bytep srcptr;
png_bytep dstptr;
png_uint_32 len;
- int untqmask, diff;
+ int unmask, diff;
- __int64 tqmask2=0x0101010202020404, //24bpp
- tqmask1=0x0408080810101020,
- tqmask0=0x2020404040808080;
+ __int64 mask2=0x0101010202020404, //24bpp
+ mask1=0x0408080810101020,
+ mask0=0x2020404040808080;
srcptr = png_ptr->row_buf + 1;
dstptr = row;
- untqmask = ~tqmask;
+ unmask = ~mask;
len = (png_ptr->width)&~7;
diff = (png_ptr->width)&7;
@@ -534,15 +534,15 @@ end16:
{
_asm
{
- movd mm7, untqmask //load bit pattern
+ movd mm7, unmask //load bit pattern
psubb mm6,mm6 //zero mm6
punpcklbw mm7,mm7
punpcklwd mm7,mm7
punpckldq mm7,mm7 //fill register with 8 masks
- movq mm0,tqmask0
- movq mm1,tqmask1
- movq mm2,tqmask2
+ movq mm0,mask0
+ movq mm1,mask1
+ movq mm2,mask2
pand mm0,mm7
pand mm1,mm7
@@ -595,7 +595,7 @@ mainloop24end:
cmp ecx,0
jz end24
- mov edx,tqmask
+ mov edx,mask
sal edx,24 //make low byte the high byte
secondloop24:
sal edx,1 //move high bit to CF
@@ -647,17 +647,17 @@ end24:
png_bytep srcptr;
png_bytep dstptr;
png_uint_32 len;
- int untqmask, diff;
+ int unmask, diff;
- __int64 tqmask3=0x0101010102020202, //32bpp
- tqmask2=0x0404040408080808,
- tqmask1=0x1010101020202020,
- tqmask0=0x4040404080808080;
+ __int64 mask3=0x0101010102020202, //32bpp
+ mask2=0x0404040408080808,
+ mask1=0x1010101020202020,
+ mask0=0x4040404080808080;
srcptr = png_ptr->row_buf + 1;
dstptr = row;
- untqmask = ~tqmask;
+ unmask = ~mask;
len = (png_ptr->width)&~7;
diff = (png_ptr->width)&7;
@@ -666,16 +666,16 @@ end24:
{
_asm
{
- movd mm7, untqmask //load bit pattern
+ movd mm7, unmask //load bit pattern
psubb mm6,mm6 //zero mm6
punpcklbw mm7,mm7
punpcklwd mm7,mm7
punpckldq mm7,mm7 //fill register with 8 masks
- movq mm0,tqmask0
- movq mm1,tqmask1
- movq mm2,tqmask2
- movq mm3,tqmask3
+ movq mm0,mask0
+ movq mm1,mask1
+ movq mm2,mask2
+ movq mm3,mask3
pand mm0,mm7
pand mm1,mm7
@@ -738,7 +738,7 @@ mainloop32end:
cmp ecx,0
jz end32
- mov edx,tqmask
+ mov edx,mask
sal edx,24 //make low byte the high byte
secondloop32:
sal edx,1 //move high bit to CF
@@ -787,14 +787,14 @@ end32:
png_bytep srcptr;
png_bytep dstptr;
png_uint_32 len;
- int untqmask, diff;
+ int unmask, diff;
- __int64 tqmask5=0x0101010101010202,
- tqmask4=0x0202020204040404,
- tqmask3=0x0404080808080808,
- tqmask2=0x1010101010102020,
- tqmask1=0x2020202040404040,
- tqmask0=0x4040808080808080;
+ __int64 mask5=0x0101010101010202,
+ mask4=0x0202020204040404,
+ mask3=0x0404080808080808,
+ mask2=0x1010101010102020,
+ mask1=0x2020202040404040,
+ mask0=0x4040808080808080;
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW)
/* && mmx_supported */ )
@@ -802,23 +802,23 @@ end32:
srcptr = png_ptr->row_buf + 1;
dstptr = row;
- untqmask = ~tqmask;
+ unmask = ~mask;
len = (png_ptr->width)&~7;
diff = (png_ptr->width)&7;
_asm
{
- movd mm7, untqmask //load bit pattern
+ movd mm7, unmask //load bit pattern
psubb mm6,mm6 //zero mm6
punpcklbw mm7,mm7
punpcklwd mm7,mm7
punpckldq mm7,mm7 //fill register with 8 masks
- movq mm0,tqmask0
- movq mm1,tqmask1
- movq mm2,tqmask2
- movq mm3,tqmask3
- movq mm4,tqmask4
- movq mm5,tqmask5
+ movq mm0,mask0
+ movq mm1,mask1
+ movq mm2,mask2
+ movq mm3,mask3
+ movq mm4,mask4
+ movq mm5,mask5
pand mm0,mm7
pand mm1,mm7
@@ -895,7 +895,7 @@ mainloop48end:
cmp ecx,0
jz end48
- mov edx,tqmask
+ mov edx,mask
sal edx,24 //make low byte the high byte
secondloop48:
@@ -966,7 +966,7 @@ end48:
break;
}
} /* end switch (png_ptr->row_info.pixel_depth) */
- } /* end if (non-trivial tqmask) */
+ } /* end if (non-trivial mask) */
} /* end png_combine_row() */
@@ -1931,7 +1931,7 @@ davgrlp:
mov diff, edi // take start of row
add diff, ebx // add bpp
add diff, 0xf // add 7 + 8 to incr past tqalignment boundary
- and diff, 0xfffffff8 // tqmask to tqalignment boundary
+ and diff, 0xfffffff8 // mask to tqalignment boundary
sub diff, edi // subtract from start ==> value ebx at tqalignment
jz davggo
// fix tqalignment
@@ -1999,7 +1999,7 @@ davg3lp:
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active
// byte
// Add 2nd active group (Raw(x-bpp)/2) to Average with LBCarry
- psllq mm6, ShiftBpp // shift the mm6 tqmask to cover bytes 3-5
+ psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 3-5
movq mm2, mm0 // mov updated Raws to mm2
psllq mm2, ShiftBpp // shift data to position correctly
movq mm1, mm3 // now use mm1 for getting LBCarrys
@@ -2013,7 +2013,7 @@ davg3lp:
// byte
// Add 3rd active group (Raw(x-bpp)/2) to Average with LBCarry
- psllq mm6, ShiftBpp // shift the mm6 tqmask to cover the last two
+ psllq mm6, ShiftBpp // shift the mm6 mask to cover the last two
// bytes
movq mm2, mm0 // mov updated Raws to mm2
psllq mm2, ShiftBpp // shift data to position correctly
@@ -2060,7 +2060,7 @@ davg3lp:
mov esi, prev_row // esi ==> Prior(x)
movq mm6, mm7
movq mm5, LBCarryMask
- psllq mm6, ShiftBpp // Create tqmask for 2nd active group
+ psllq mm6, ShiftBpp // Create mask for 2nd active group
// PRIME the pump (load the first Raw(x-bpp) data set
movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
// (we correct position in loop below)
@@ -2144,7 +2144,7 @@ davg2lp:
pand mm2, mm6 // Leave only Active Group 1 bytes to add to Avg
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte
// Add 2nd active group (Raw(x-bpp)/2) to Average with LBCarry
- psllq mm6, ShiftBpp // shift the mm6 tqmask to cover bytes 2 & 3
+ psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 2 & 3
movq mm2, mm0 // mov updated Raws to mm2
psllq mm2, ShiftBpp // shift data to position correctly
movq mm1, mm3 // now use mm1 for getting LBCarrys
@@ -2157,7 +2157,7 @@ davg2lp:
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte
// Add rdd active group (Raw(x-bpp)/2) to Average with LBCarry
- psllq mm6, ShiftBpp // shift the mm6 tqmask to cover bytes 4 & 5
+ psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 4 & 5
movq mm2, mm0 // mov updated Raws to mm2
psllq mm2, ShiftBpp // shift data to position correctly
// Data only needs to be shifted once here to
@@ -2172,7 +2172,7 @@ davg2lp:
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte
// Add 4th active group (Raw(x-bpp)/2) to Average with LBCarry
- psllq mm6, ShiftBpp // shift the mm6 tqmask to cover bytes 6 & 7
+ psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 6 & 7
movq mm2, mm0 // mov updated Raws to mm2
psllq mm2, ShiftBpp // shift data to position correctly
// Data only needs to be shifted once here to
@@ -2365,7 +2365,7 @@ dpthrlp:
add diff, ebx // add bpp
xor ecx, ecx
add diff, 0xf // add 7 + 8 to incr past tqalignment boundary
- and diff, 0xfffffff8 // tqmask to tqalignment boundary
+ and diff, 0xfffffff8 // mask to tqalignment boundary
sub diff, edi // subtract from start ==> value ebx at tqalignment
jz dpthgo
// fix tqalignment
@@ -2478,16 +2478,16 @@ dpth3lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
paddw mm6, mm5
pand mm0, mm4 // Only pav bytes < 0 in mm7
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
psubw mm4, mm0
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm5, mm7
psubw mm6, mm0
@@ -2496,9 +2496,9 @@ dpth3lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -2538,8 +2538,8 @@ dpth3lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm5 // Create tqmask pbv bytes < 0
- pcmpgtw mm7, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm5 // Create mask pbv bytes < 0
+ pcmpgtw mm7, mm4 // Create mask pav bytes < 0
pand mm0, mm5 // Only pbv bytes < 0 in mm0
pand mm7, mm4 // Only pav bytes < 0 in mm7
psubw mm5, mm0
@@ -2547,7 +2547,7 @@ dpth3lp:
psubw mm5, mm0
psubw mm4, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm6, mm0
// test pa <= pb
@@ -2555,9 +2555,9 @@ dpth3lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -2600,8 +2600,8 @@ dpth3lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
pand mm0, mm4 // Only pav bytes < 0 in mm7
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
@@ -2609,7 +2609,7 @@ dpth3lp:
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm6, mm0
// test pa <= pb
@@ -2617,9 +2617,9 @@ dpth3lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
pandn mm0, mm1
pandn mm7, mm4
@@ -2686,16 +2686,16 @@ dpth6lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
paddw mm6, mm5
pand mm0, mm4 // Only pav bytes < 0 in mm7
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
psubw mm4, mm0
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm5, mm7
psubw mm6, mm0
@@ -2704,9 +2704,9 @@ dpth6lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -2750,16 +2750,16 @@ dpth6lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
paddw mm6, mm5
pand mm0, mm4 // Only pav bytes < 0 in mm7
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
psubw mm4, mm0
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm5, mm7
psubw mm6, mm0
@@ -2768,9 +2768,9 @@ dpth6lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -2826,16 +2826,16 @@ dpth4lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
paddw mm6, mm5
pand mm0, mm4 // Only pav bytes < 0 in mm7
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
psubw mm4, mm0
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm5, mm7
psubw mm6, mm0
@@ -2844,9 +2844,9 @@ dpth4lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -2882,16 +2882,16 @@ dpth4lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
paddw mm6, mm5
pand mm0, mm4 // Only pav bytes < 0 in mm7
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
psubw mm4, mm0
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm5, mm7
psubw mm6, mm0
@@ -2900,9 +2900,9 @@ dpth4lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -2957,16 +2957,16 @@ dpth8lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
paddw mm6, mm5
pand mm0, mm4 // Only pav bytes < 0 in mm7
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
psubw mm4, mm0
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm5, mm7
psubw mm6, mm0
@@ -2975,9 +2975,9 @@ dpth8lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -3014,16 +3014,16 @@ dpth8lp:
// pa = abs(p-a) = abs(pav)
// pb = abs(p-b) = abs(pbv)
// pc = abs(p-c) = abs(pcv)
- pcmpgtw mm0, mm4 // Create tqmask pav bytes < 0
+ pcmpgtw mm0, mm4 // Create mask pav bytes < 0
paddw mm6, mm5
pand mm0, mm4 // Only pav bytes < 0 in mm7
- pcmpgtw mm7, mm5 // Create tqmask pbv bytes < 0
+ pcmpgtw mm7, mm5 // Create mask pbv bytes < 0
psubw mm4, mm0
pand mm7, mm5 // Only pbv bytes < 0 in mm0
psubw mm4, mm0
psubw mm5, mm7
pxor mm0, mm0
- pcmpgtw mm0, mm6 // Create tqmask pcv bytes < 0
+ pcmpgtw mm0, mm6 // Create mask pcv bytes < 0
pand mm0, mm6 // Only pav bytes < 0 in mm7
psubw mm5, mm7
psubw mm6, mm0
@@ -3032,9 +3032,9 @@ dpth8lp:
psubw mm6, mm0
pcmpgtw mm7, mm5 // pa > pb?
movq mm0, mm7
- // use mm7 tqmask to merge pa & pb
+ // use mm7 mask to merge pa & pb
pand mm5, mm7
- // use mm0 tqmask copy to merge a & b
+ // use mm0 mask copy to merge a & b
pand mm2, mm0
pandn mm7, mm4
pandn mm0, mm1
@@ -3245,7 +3245,7 @@ png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row)
add diff, 0xf // add 7 + 8 to incr past
// tqalignment boundary
xor ebx, ebx
- and diff, 0xfffffff8 // tqmask to tqalignment boundary
+ and diff, 0xfffffff8 // mask to tqalignment boundary
sub diff, edi // subtract from start ==> value
// ebx at tqalignment
jz dsubgo
@@ -3280,25 +3280,25 @@ dsubgo:
add edi, bpp // rp = row + bpp
movq mm6, mm7
mov ebx, diff
- psllq mm6, ShiftBpp // Move tqmask in mm6 to cover 3rd active
+ psllq mm6, ShiftBpp // Move mask in mm6 to cover 3rd active
// byte group
// PRIME the pump (load the first Raw(x-bpp) data set
movq mm1, [edi+ebx-8]
dsub3lp:
psrlq mm1, ShiftRem // Shift data for adding 1st bpp bytes
- // no need for tqmask; shift clears inactive bytes
+ // no need for mask; shift clears inactive bytes
// Add 1st active group
movq mm0, [edi+ebx]
paddb mm0, mm1
// Add 2nd active group
movq mm1, mm0 // mov updated Raws to mm1
psllq mm1, ShiftBpp // shift data to position correctly
- pand mm1, mm7 // tqmask to use only 2nd active group
+ pand mm1, mm7 // mask to use only 2nd active group
paddb mm0, mm1
// Add 3rd active group
movq mm1, mm0 // mov updated Raws to mm1
psllq mm1, ShiftBpp // shift data to position correctly
- pand mm1, mm6 // tqmask to use only 3rd active group
+ pand mm1, mm6 // mask to use only 3rd active group
add ebx, 8
paddb mm0, mm1
cmp ebx, MMXLength
@@ -3359,13 +3359,13 @@ dsub1end:
movq mm1, [edi+ebx-8]
dsub4lp:
psrlq mm1, ShiftRem // Shift data for adding 1st bpp bytes
- // no need for tqmask; shift clears inactive bytes
+ // no need for mask; shift clears inactive bytes
movq mm0, [edi+ebx]
paddb mm0, mm1
// Add 2nd active group
movq mm1, mm0 // mov updated Raws to mm1
psllq mm1, ShiftBpp // shift data to position correctly
- // there is no need for any tqmask
+ // there is no need for any mask
// since shift clears inactive bits/bytes
add ebx, 8
paddb mm0, mm1
@@ -3387,36 +3387,36 @@ dsub4lp:
mov ebx, diff
movq mm6, mm7
mov edi, row
- psllq mm6, ShiftBpp // Move tqmask in mm6 to cover 3rd active
+ psllq mm6, ShiftBpp // Move mask in mm6 to cover 3rd active
// byte group
mov esi, edi // lp = row
movq mm5, mm6
add edi, bpp // rp = row + bpp
- psllq mm5, ShiftBpp // Move tqmask in mm5 to cover 4th active
+ psllq mm5, ShiftBpp // Move mask in mm5 to cover 4th active
// byte group
// PRIME the pump (load the first Raw(x-bpp) data set
movq mm1, [edi+ebx-8]
dsub2lp:
// Add 1st active group
psrlq mm1, ShiftRem // Shift data for adding 1st bpp bytes
- // no need for tqmask; shift clears inactive
+ // no need for mask; shift clears inactive
// bytes
movq mm0, [edi+ebx]
paddb mm0, mm1
// Add 2nd active group
movq mm1, mm0 // mov updated Raws to mm1
psllq mm1, ShiftBpp // shift data to position correctly
- pand mm1, mm7 // tqmask to use only 2nd active group
+ pand mm1, mm7 // mask to use only 2nd active group
paddb mm0, mm1
// Add 3rd active group
movq mm1, mm0 // mov updated Raws to mm1
psllq mm1, ShiftBpp // shift data to position correctly
- pand mm1, mm6 // tqmask to use only 3rd active group
+ pand mm1, mm6 // mask to use only 3rd active group
paddb mm0, mm1
// Add 4th active group
movq mm1, mm0 // mov updated Raws to mm1
psllq mm1, ShiftBpp // shift data to position correctly
- pand mm1, mm5 // tqmask to use only 4th active group
+ pand mm1, mm5 // mask to use only 4th active group
add ebx, 8
paddb mm0, mm1
cmp ebx, MMXLength