diff options
author | marha <marha@users.sourceforge.net> | 2011-02-16 17:16:41 +0000 |
---|---|---|
committer | marha <marha@users.sourceforge.net> | 2011-02-16 17:16:41 +0000 |
commit | 41723858b0effb450ce946297e5a606bc7348be2 (patch) | |
tree | 8831ace435ad11e06f196e62f0fb4d63fa740deb | |
parent | 92fef6a3a6851ee123dd793788aac50c0831a964 (diff) | |
parent | 48d0dcbd5b7f80810ce259bc9ed6f57f99e27ca9 (diff) | |
download | vcxsrv-41723858b0effb450ce946297e5a606bc7348be2.tar.gz vcxsrv-41723858b0effb450ce946297e5a606bc7348be2.tar.bz2 vcxsrv-41723858b0effb450ce946297e5a606bc7348be2.zip |
svn merge ^/branches/released .
41 files changed, 11937 insertions, 11483 deletions
diff --git a/mesalib/src/glsl/ir_constant_expression.cpp b/mesalib/src/glsl/ir_constant_expression.cpp index 2841fb350..1d39e31fa 100644 --- a/mesalib/src/glsl/ir_constant_expression.cpp +++ b/mesalib/src/glsl/ir_constant_expression.cpp @@ -1,1376 +1,1372 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/** - * \file ir_constant_expression.cpp - * Evaluate and process constant valued expressions - * - * In GLSL, constant valued expressions are used in several places. These - * must be processed and evaluated very early in the compilation process. - * - * * Sizes of arrays - * * Initializers for uniforms - * * Initializers for \c const variables - */ - -#include <math.h> -#include "main/core.h" /* for MAX2, MIN2, CLAMP */ -#include "ir.h" -#include "ir_visitor.h" -#include "glsl_types.h" - -static float -dot(ir_constant *op0, ir_constant *op1) -{ - assert(op0->type->is_float() && op1->type->is_float()); - - float result = 0; - for (unsigned c = 0; c < op0->type->components(); c++) - result += op0->value.f[c] * op1->value.f[c]; - - return result; -} - -ir_constant * -ir_expression::constant_expression_value() -{ - if (this->type->is_error()) - return NULL; - - ir_constant *op[Elements(this->operands)] = { NULL, }; - ir_constant_data data; - - memset(&data, 0, sizeof(data)); - - for (unsigned operand = 0; operand < this->get_num_operands(); operand++) { - op[operand] = this->operands[operand]->constant_expression_value(); - if (!op[operand]) - return NULL; - } - - if (op[1] != NULL) - assert(op[0]->type->base_type == op[1]->type->base_type); - - bool op0_scalar = op[0]->type->is_scalar(); - bool op1_scalar = op[1] != NULL && op[1]->type->is_scalar(); - - /* When iterating over a vector or matrix's components, we want to increase - * the loop counter. However, for scalars, we want to stay at 0. - */ - unsigned c0_inc = op0_scalar ? 0 : 1; - unsigned c1_inc = op1_scalar ? 0 : 1; - unsigned components; - if (op1_scalar || !op[1]) { - components = op[0]->type->components(); - } else { - components = op[1]->type->components(); - } - - void *ctx = ralloc_parent(this); - - /* Handle array operations here, rather than below. */ - if (op[0]->type->is_array()) { - assert(op[1] != NULL && op[1]->type->is_array()); - switch (this->operation) { - case ir_binop_all_equal: - return new(ctx) ir_constant(op[0]->has_value(op[1])); - case ir_binop_any_nequal: - return new(ctx) ir_constant(!op[0]->has_value(op[1])); - default: - break; - } - return NULL; - } - - switch (this->operation) { - case ir_unop_bit_not: - switch (op[0]->type->base_type) { - case GLSL_TYPE_INT: - for (unsigned c = 0; c < components; c++) - data.i[c] = ~ op[0]->value.i[c]; - break; - case GLSL_TYPE_UINT: - for (unsigned c = 0; c < components; c++) - data.u[c] = ~ op[0]->value.u[c]; - break; - default: - assert(0); - } - break; - - case ir_unop_logic_not: - assert(op[0]->type->base_type == GLSL_TYPE_BOOL); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.b[c] = !op[0]->value.b[c]; - break; - - case ir_unop_f2i: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.i[c] = (int) op[0]->value.f[c]; - } - break; - case ir_unop_i2f: - assert(op[0]->type->base_type == GLSL_TYPE_INT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = (float) op[0]->value.i[c]; - } - break; - case ir_unop_u2f: - assert(op[0]->type->base_type == GLSL_TYPE_UINT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = (float) op[0]->value.u[c]; - } - break; - case ir_unop_b2f: - assert(op[0]->type->base_type == GLSL_TYPE_BOOL); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = op[0]->value.b[c] ? 1.0F : 0.0F; - } - break; - case ir_unop_f2b: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.b[c] = op[0]->value.f[c] != 0.0F ? true : false; - } - break; - case ir_unop_b2i: - assert(op[0]->type->base_type == GLSL_TYPE_BOOL); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.u[c] = op[0]->value.b[c] ? 1 : 0; - } - break; - case ir_unop_i2b: - assert(op[0]->type->is_integer()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.b[c] = op[0]->value.u[c] ? true : false; - } - break; - - case ir_unop_any: - assert(op[0]->type->is_boolean()); - data.b[0] = false; - for (unsigned c = 0; c < op[0]->type->components(); c++) { - if (op[0]->value.b[c]) - data.b[0] = true; - } - break; - - case ir_unop_trunc: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = truncf(op[0]->value.f[c]); - } - break; - - case ir_unop_ceil: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = ceilf(op[0]->value.f[c]); - } - break; - - case ir_unop_floor: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = floorf(op[0]->value.f[c]); - } - break; - - case ir_unop_fract: - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (this->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = 0; - break; - case GLSL_TYPE_INT: - data.i[c] = 0; - break; - case GLSL_TYPE_FLOAT: - data.f[c] = op[0]->value.f[c] - floor(op[0]->value.f[c]); - break; - default: - assert(0); - } - } - break; - - case ir_unop_sin: - case ir_unop_sin_reduced: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = sinf(op[0]->value.f[c]); - } - break; - - case ir_unop_cos: - case ir_unop_cos_reduced: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = cosf(op[0]->value.f[c]); - } - break; - - case ir_unop_neg: - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (this->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = -((int) op[0]->value.u[c]); - break; - case GLSL_TYPE_INT: - data.i[c] = -op[0]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.f[c] = -op[0]->value.f[c]; - break; - default: - assert(0); - } - } - break; - - case ir_unop_abs: - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (this->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.i[c] = op[0]->value.i[c]; - if (data.i[c] < 0) - data.i[c] = -data.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.f[c] = fabs(op[0]->value.f[c]); - break; - default: - assert(0); - } - } - break; - - case ir_unop_sign: - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (this->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.i[c] > 0; - break; - case GLSL_TYPE_INT: - data.i[c] = (op[0]->value.i[c] > 0) - (op[0]->value.i[c] < 0); - break; - case GLSL_TYPE_FLOAT: - data.f[c] = float((op[0]->value.f[c] > 0)-(op[0]->value.f[c] < 0)); - break; - default: - assert(0); - } - } - break; - - case ir_unop_rcp: - /* FINISHME: Emit warning when division-by-zero is detected. */ - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (this->type->base_type) { - case GLSL_TYPE_UINT: - if (op[0]->value.u[c] == 0.0) - return NULL; - data.u[c] = 1 / op[0]->value.u[c]; - break; - case GLSL_TYPE_INT: - if (op[0]->value.i[c] == 0.0) - return NULL; - data.i[c] = 1 / op[0]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - if (op[0]->value.f[c] == 0.0) - return NULL; - data.f[c] = 1.0F / op[0]->value.f[c]; - break; - default: - assert(0); - } - } - break; - - case ir_unop_rsq: - /* FINISHME: Emit warning when division-by-zero is detected. */ - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - float s = sqrtf(op[0]->value.f[c]); - if (s == 0) - return NULL; - data.f[c] = 1.0F / s; - } - break; - - case ir_unop_sqrt: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = sqrtf(op[0]->value.f[c]); - } - break; - - case ir_unop_exp: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = expf(op[0]->value.f[c]); - } - break; - - case ir_unop_exp2: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = exp2f(op[0]->value.f[c]); - } - break; - - case ir_unop_log: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = logf(op[0]->value.f[c]); - } - break; - - case ir_unop_log2: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = log2f(op[0]->value.f[c]); - } - break; - - case ir_unop_dFdx: - case ir_unop_dFdy: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = 0.0; - } - break; - - case ir_binop_pow: - assert(op[0]->type->base_type == GLSL_TYPE_FLOAT); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - data.f[c] = powf(op[0]->value.f[c], op[1]->value.f[c]); - } - break; - - case ir_binop_dot: - data.f[0] = dot(op[0], op[1]); - break; - - case ir_binop_min: - assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar); - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = MIN2(op[0]->value.u[c0], op[1]->value.u[c1]); - break; - case GLSL_TYPE_INT: - data.i[c] = MIN2(op[0]->value.i[c0], op[1]->value.i[c1]); - break; - case GLSL_TYPE_FLOAT: - data.f[c] = MIN2(op[0]->value.f[c0], op[1]->value.f[c1]); - break; - default: - assert(0); - } - } - - break; - case ir_binop_max: - assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar); - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = MAX2(op[0]->value.u[c0], op[1]->value.u[c1]); - break; - case GLSL_TYPE_INT: - data.i[c] = MAX2(op[0]->value.i[c0], op[1]->value.i[c1]); - break; - case GLSL_TYPE_FLOAT: - data.f[c] = MAX2(op[0]->value.f[c0], op[1]->value.f[c1]); - break; - default: - assert(0); - } - } - break; - - case ir_binop_add: - assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar); - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.u[c0] + op[1]->value.u[c1]; - break; - case GLSL_TYPE_INT: - data.i[c] = op[0]->value.i[c0] + op[1]->value.i[c1]; - break; - case GLSL_TYPE_FLOAT: - data.f[c] = op[0]->value.f[c0] + op[1]->value.f[c1]; - break; - default: - assert(0); - } - } - - break; - case ir_binop_sub: - assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar); - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.u[c0] - op[1]->value.u[c1]; - break; - case GLSL_TYPE_INT: - data.i[c] = op[0]->value.i[c0] - op[1]->value.i[c1]; - break; - case GLSL_TYPE_FLOAT: - data.f[c] = op[0]->value.f[c0] - op[1]->value.f[c1]; - break; - default: - assert(0); - } - } - - break; - case ir_binop_mul: - /* Check for equal types, or unequal types involving scalars */ - if ((op[0]->type == op[1]->type && !op[0]->type->is_matrix()) - || op0_scalar || op1_scalar) { - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.u[c0] * op[1]->value.u[c1]; - break; - case GLSL_TYPE_INT: - data.i[c] = op[0]->value.i[c0] * op[1]->value.i[c1]; - break; - case GLSL_TYPE_FLOAT: - data.f[c] = op[0]->value.f[c0] * op[1]->value.f[c1]; - break; - default: - assert(0); - } - } - } else { - assert(op[0]->type->is_matrix() || op[1]->type->is_matrix()); - - /* Multiply an N-by-M matrix with an M-by-P matrix. Since either - * matrix can be a GLSL vector, either N or P can be 1. - * - * For vec*mat, the vector is treated as a row vector. This - * means the vector is a 1-row x M-column matrix. - * - * For mat*vec, the vector is treated as a column vector. Since - * matrix_columns is 1 for vectors, this just works. - */ - const unsigned n = op[0]->type->is_vector() - ? 1 : op[0]->type->vector_elements; - const unsigned m = op[1]->type->vector_elements; - const unsigned p = op[1]->type->matrix_columns; - for (unsigned j = 0; j < p; j++) { - for (unsigned i = 0; i < n; i++) { - for (unsigned k = 0; k < m; k++) { - data.f[i+n*j] += op[0]->value.f[i+n*k]*op[1]->value.f[k+m*j]; - } - } - } - } - - break; - case ir_binop_div: - /* FINISHME: Emit warning when division-by-zero is detected. */ - assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar); - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - if (op[1]->value.u[c1] == 0) - return NULL; - data.u[c] = op[0]->value.u[c0] / op[1]->value.u[c1]; - break; - case GLSL_TYPE_INT: - if (op[1]->value.i[c1] == 0) - return NULL; - data.i[c] = op[0]->value.i[c0] / op[1]->value.i[c1]; - break; - case GLSL_TYPE_FLOAT: - if (op[1]->value.f[c1] == 0) - return NULL; - data.f[c] = op[0]->value.f[c0] / op[1]->value.f[c1]; - break; - default: - assert(0); - } - } - - break; - case ir_binop_mod: - /* FINISHME: Emit warning when division-by-zero is detected. */ - assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar); - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - if (op[1]->value.u[c1] == 0) - return NULL; - data.u[c] = op[0]->value.u[c0] % op[1]->value.u[c1]; - break; - case GLSL_TYPE_INT: - if (op[1]->value.i[c1] == 0) - return NULL; - data.i[c] = op[0]->value.i[c0] % op[1]->value.i[c1]; - break; - case GLSL_TYPE_FLOAT: - if (op[1]->value.f[c1] == 0) - return NULL; - /* We don't use fmod because it rounds toward zero; GLSL specifies - * the use of floor. - */ - data.f[c] = op[0]->value.f[c0] - op[1]->value.f[c1] - * floorf(op[0]->value.f[c0] / op[1]->value.f[c1]); - break; - default: - assert(0); - } - } - - break; - - case ir_binop_logic_and: - assert(op[0]->type->base_type == GLSL_TYPE_BOOL); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.b[c] = op[0]->value.b[c] && op[1]->value.b[c]; - break; - case ir_binop_logic_xor: - assert(op[0]->type->base_type == GLSL_TYPE_BOOL); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.b[c] = op[0]->value.b[c] ^ op[1]->value.b[c]; - break; - case ir_binop_logic_or: - assert(op[0]->type->base_type == GLSL_TYPE_BOOL); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.b[c] = op[0]->value.b[c] || op[1]->value.b[c]; - break; - - case ir_binop_less: - assert(op[0]->type == op[1]->type); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[0] = op[0]->value.u[0] < op[1]->value.u[0]; - break; - case GLSL_TYPE_INT: - data.b[0] = op[0]->value.i[0] < op[1]->value.i[0]; - break; - case GLSL_TYPE_FLOAT: - data.b[0] = op[0]->value.f[0] < op[1]->value.f[0]; - break; - default: - assert(0); - } - } - break; - case ir_binop_greater: - assert(op[0]->type == op[1]->type); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] > op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] > op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] > op[1]->value.f[c]; - break; - default: - assert(0); - } - } - break; - case ir_binop_lequal: - assert(op[0]->type == op[1]->type); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[0] = op[0]->value.u[0] <= op[1]->value.u[0]; - break; - case GLSL_TYPE_INT: - data.b[0] = op[0]->value.i[0] <= op[1]->value.i[0]; - break; - case GLSL_TYPE_FLOAT: - data.b[0] = op[0]->value.f[0] <= op[1]->value.f[0]; - break; - default: - assert(0); - } - } - break; - case ir_binop_gequal: - assert(op[0]->type == op[1]->type); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[0] = op[0]->value.u[0] >= op[1]->value.u[0]; - break; - case GLSL_TYPE_INT: - data.b[0] = op[0]->value.i[0] >= op[1]->value.i[0]; - break; - case GLSL_TYPE_FLOAT: - data.b[0] = op[0]->value.f[0] >= op[1]->value.f[0]; - break; - default: - assert(0); - } - } - break; - case ir_binop_equal: - assert(op[0]->type == op[1]->type); - for (unsigned c = 0; c < components; c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] == op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] == op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] == op[1]->value.f[c]; - break; - default: - assert(0); - } - } - break; - case ir_binop_nequal: - assert(op[0]->type != op[1]->type); - for (unsigned c = 0; c < components; c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] != op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] != op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] != op[1]->value.f[c]; - break; - default: - assert(0); - } - } - break; - case ir_binop_all_equal: - data.b[0] = op[0]->has_value(op[1]); - break; - case ir_binop_any_nequal: - data.b[0] = !op[0]->has_value(op[1]); - break; - - case ir_binop_lshift: - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - if (op[0]->type->base_type == GLSL_TYPE_INT && - op[1]->type->base_type == GLSL_TYPE_INT) { - data.i[c] = op[0]->value.i[c0] << op[1]->value.i[c1]; - - } else if (op[0]->type->base_type == GLSL_TYPE_INT && - op[1]->type->base_type == GLSL_TYPE_UINT) { - data.i[c] = op[0]->value.i[c0] << op[1]->value.u[c1]; - - } else if (op[0]->type->base_type == GLSL_TYPE_UINT && - op[1]->type->base_type == GLSL_TYPE_INT) { - data.u[c] = op[0]->value.u[c0] << op[1]->value.i[c1]; - - } else if (op[0]->type->base_type == GLSL_TYPE_UINT && - op[1]->type->base_type == GLSL_TYPE_UINT) { - data.u[c] = op[0]->value.u[c0] << op[1]->value.u[c1]; - } - } - break; - - case ir_binop_rshift: - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - if (op[0]->type->base_type == GLSL_TYPE_INT && - op[1]->type->base_type == GLSL_TYPE_INT) { - data.i[c] = op[0]->value.i[c0] >> op[1]->value.i[c1]; - - } else if (op[0]->type->base_type == GLSL_TYPE_INT && - op[1]->type->base_type == GLSL_TYPE_UINT) { - data.i[c] = op[0]->value.i[c0] >> op[1]->value.u[c1]; - - } else if (op[0]->type->base_type == GLSL_TYPE_UINT && - op[1]->type->base_type == GLSL_TYPE_INT) { - data.u[c] = op[0]->value.u[c0] >> op[1]->value.i[c1]; - - } else if (op[0]->type->base_type == GLSL_TYPE_UINT && - op[1]->type->base_type == GLSL_TYPE_UINT) { - data.u[c] = op[0]->value.u[c0] >> op[1]->value.u[c1]; - } - } - break; - - case ir_binop_bit_and: - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_INT: - data.i[c] = op[0]->value.i[c0] & op[1]->value.i[c1]; - break; - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.u[c0] & op[1]->value.u[c1]; - break; - default: - assert(0); - } - } - break; - - case ir_binop_bit_or: - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_INT: - data.i[c] = op[0]->value.i[c0] | op[1]->value.i[c1]; - break; - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.u[c0] | op[1]->value.u[c1]; - break; - default: - assert(0); - } - } - break; - - case ir_binop_bit_xor: - for (unsigned c = 0, c0 = 0, c1 = 0; - c < components; - c0 += c0_inc, c1 += c1_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_INT: - data.i[c] = op[0]->value.i[c0] ^ op[1]->value.i[c1]; - break; - case GLSL_TYPE_UINT: - data.u[c] = op[0]->value.u[c0] ^ op[1]->value.u[c1]; - break; - default: - assert(0); - } - } - break; - - case ir_quadop_vector: - for (unsigned c = 0; c < this->type->vector_elements; c++) { - switch (this->type->base_type) { - case GLSL_TYPE_INT: - data.i[c] = op[c]->value.i[0]; - break; - case GLSL_TYPE_UINT: - data.u[c] = op[c]->value.u[0]; - break; - case GLSL_TYPE_FLOAT: - data.f[c] = op[c]->value.f[0]; - break; - default: - assert(0); - } - } - break; - - default: - /* FINISHME: Should handle all expression types. */ - return NULL; - } - - return new(ctx) ir_constant(this->type, &data); -} - - -ir_constant * -ir_texture::constant_expression_value() -{ - /* texture lookups aren't constant expressions */ - return NULL; -} - - -ir_constant * -ir_swizzle::constant_expression_value() -{ - ir_constant *v = this->val->constant_expression_value(); - - if (v != NULL) { - ir_constant_data data = { { 0 } }; - - const unsigned swiz_idx[4] = { - this->mask.x, this->mask.y, this->mask.z, this->mask.w - }; - - for (unsigned i = 0; i < this->mask.num_components; i++) { - switch (v->type->base_type) { - case GLSL_TYPE_UINT: - case GLSL_TYPE_INT: data.u[i] = v->value.u[swiz_idx[i]]; break; - case GLSL_TYPE_FLOAT: data.f[i] = v->value.f[swiz_idx[i]]; break; - case GLSL_TYPE_BOOL: data.b[i] = v->value.b[swiz_idx[i]]; break; - default: assert(!"Should not get here."); break; - } - } - - void *ctx = ralloc_parent(this); - return new(ctx) ir_constant(this->type, &data); - } - return NULL; -} - - -ir_constant * -ir_dereference_variable::constant_expression_value() -{ - /* This may occur during compile and var->type is glsl_type::error_type */ - if (!var) - return NULL; - - /* The constant_value of a uniform variable is its initializer, - * not the lifetime constant value of the uniform. - */ - if (var->mode == ir_var_uniform) - return NULL; - - if (!var->constant_value) - return NULL; - - return var->constant_value->clone(ralloc_parent(var), NULL); -} - - -ir_constant * -ir_dereference_array::constant_expression_value() -{ - ir_constant *array = this->array->constant_expression_value(); - ir_constant *idx = this->array_index->constant_expression_value(); - - if ((array != NULL) && (idx != NULL)) { - void *ctx = ralloc_parent(this); - if (array->type->is_matrix()) { - /* Array access of a matrix results in a vector. - */ - const unsigned column = idx->value.u[0]; - - const glsl_type *const column_type = array->type->column_type(); - - /* Offset in the constant matrix to the first element of the column - * to be extracted. - */ - const unsigned mat_idx = column * column_type->vector_elements; - - ir_constant_data data = { { 0 } }; - - switch (column_type->base_type) { - case GLSL_TYPE_UINT: - case GLSL_TYPE_INT: - for (unsigned i = 0; i < column_type->vector_elements; i++) - data.u[i] = array->value.u[mat_idx + i]; - - break; - - case GLSL_TYPE_FLOAT: - for (unsigned i = 0; i < column_type->vector_elements; i++) - data.f[i] = array->value.f[mat_idx + i]; - - break; - - default: - assert(!"Should not get here."); - break; - } - - return new(ctx) ir_constant(column_type, &data); - } else if (array->type->is_vector()) { - const unsigned component = idx->value.u[0]; - - return new(ctx) ir_constant(array, component); - } else { - const unsigned index = idx->value.u[0]; - return array->get_array_element(index)->clone(ctx, NULL); - } - } - return NULL; -} - - -ir_constant * -ir_dereference_record::constant_expression_value() -{ - ir_constant *v = this->record->constant_expression_value(); - - return (v != NULL) ? v->get_record_field(this->field) : NULL; -} - - -ir_constant * -ir_assignment::constant_expression_value() -{ - /* FINISHME: Handle CEs involving assignment (return RHS) */ - return NULL; -} - - -ir_constant * -ir_constant::constant_expression_value() -{ - return this; -} - - -ir_constant * -ir_call::constant_expression_value() -{ - if (this->type == glsl_type::error_type) - return NULL; - - /* From the GLSL 1.20 spec, page 23: - * "Function calls to user-defined functions (non-built-in functions) - * cannot be used to form constant expressions." - */ - if (!this->callee->is_builtin) - return NULL; - - unsigned num_parameters = 0; - - /* Check if all parameters are constant */ - ir_constant *op[3]; - foreach_list(n, &this->actual_parameters) { - ir_constant *constant = ((ir_rvalue *) n)->constant_expression_value(); - if (constant == NULL) - return NULL; - - op[num_parameters] = constant; - - assert(num_parameters < 3); - num_parameters++; - } - - /* Individual cases below can either: - * - Assign "expr" a new ir_expression to evaluate (for basic opcodes) - * - Fill "data" with appopriate constant data - * - Return an ir_constant directly. - */ - void *mem_ctx = ralloc_parent(this); - ir_expression *expr = NULL; - - ir_constant_data data; - memset(&data, 0, sizeof(data)); - - const char *callee = this->callee_name(); - if (strcmp(callee, "abs") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_abs, type, op[0], NULL); - } else if (strcmp(callee, "all") == 0) { - assert(op[0]->type->is_boolean()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - if (!op[0]->value.b[c]) - return new(mem_ctx) ir_constant(false); - } - return new(mem_ctx) ir_constant(true); - } else if (strcmp(callee, "any") == 0) { - assert(op[0]->type->is_boolean()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - if (op[0]->value.b[c]) - return new(mem_ctx) ir_constant(true); - } - return new(mem_ctx) ir_constant(false); - } else if (strcmp(callee, "acos") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = acosf(op[0]->value.f[c]); - } else if (strcmp(callee, "acosh") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = acoshf(op[0]->value.f[c]); - } else if (strcmp(callee, "asin") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = asinf(op[0]->value.f[c]); - } else if (strcmp(callee, "asinh") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = asinhf(op[0]->value.f[c]); - } else if (strcmp(callee, "atan") == 0) { - assert(op[0]->type->is_float()); - if (num_parameters == 2) { - assert(op[1]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = atan2f(op[0]->value.f[c], op[1]->value.f[c]); - } else { - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = atanf(op[0]->value.f[c]); - } - } else if (strcmp(callee, "atanh") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = atanhf(op[0]->value.f[c]); - } else if (strcmp(callee, "dFdx") == 0 || strcmp(callee, "dFdy") == 0) { - return ir_constant::zero(mem_ctx, this->type); - } else if (strcmp(callee, "ceil") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_ceil, type, op[0], NULL); - } else if (strcmp(callee, "clamp") == 0) { - assert(num_parameters == 3); - unsigned c1_inc = op[1]->type->is_scalar() ? 0 : 1; - unsigned c2_inc = op[2]->type->is_scalar() ? 0 : 1; - for (unsigned c = 0, c1 = 0, c2 = 0; - c < op[0]->type->components(); - c1 += c1_inc, c2 += c2_inc, c++) { - - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.u[c] = CLAMP(op[0]->value.u[c], op[1]->value.u[c1], - op[2]->value.u[c2]); - break; - case GLSL_TYPE_INT: - data.i[c] = CLAMP(op[0]->value.i[c], op[1]->value.i[c1], - op[2]->value.i[c2]); - break; - case GLSL_TYPE_FLOAT: - data.f[c] = CLAMP(op[0]->value.f[c], op[1]->value.f[c1], - op[2]->value.f[c2]); - break; - default: - assert(!"Should not get here."); - } - } - } else if (strcmp(callee, "cos") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_cos, type, op[0], NULL); - } else if (strcmp(callee, "cosh") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = coshf(op[0]->value.f[c]); - } else if (strcmp(callee, "cross") == 0) { - assert(op[0]->type == glsl_type::vec3_type); - assert(op[1]->type == glsl_type::vec3_type); - data.f[0] = (op[0]->value.f[1] * op[1]->value.f[2] - - op[1]->value.f[1] * op[0]->value.f[2]); - data.f[1] = (op[0]->value.f[2] * op[1]->value.f[0] - - op[1]->value.f[2] * op[0]->value.f[0]); - data.f[2] = (op[0]->value.f[0] * op[1]->value.f[1] - - op[1]->value.f[0] * op[0]->value.f[1]); - } else if (strcmp(callee, "degrees") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = 180.0F / M_PI * op[0]->value.f[c]; - } else if (strcmp(callee, "distance") == 0) { - assert(op[0]->type->is_float() && op[1]->type->is_float()); - float length_squared = 0.0; - for (unsigned c = 0; c < op[0]->type->components(); c++) { - float t = op[0]->value.f[c] - op[1]->value.f[c]; - length_squared += t * t; - } - return new(mem_ctx) ir_constant(sqrtf(length_squared)); - } else if (strcmp(callee, "dot") == 0) { - return new(mem_ctx) ir_constant(dot(op[0], op[1])); - } else if (strcmp(callee, "equal") == 0) { - assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] == op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] == op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] == op[1]->value.f[c]; - break; - case GLSL_TYPE_BOOL: - data.b[c] = op[0]->value.b[c] == op[1]->value.b[c]; - break; - default: - assert(!"Should not get here."); - } - } - } else if (strcmp(callee, "exp") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_exp, type, op[0], NULL); - } else if (strcmp(callee, "exp2") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_exp2, type, op[0], NULL); - } else if (strcmp(callee, "faceforward") == 0) { - if (dot(op[2], op[1]) < 0) - return op[0]; - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = -op[0]->value.f[c]; - } else if (strcmp(callee, "floor") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_floor, type, op[0], NULL); - } else if (strcmp(callee, "fract") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_fract, type, op[0], NULL); - } else if (strcmp(callee, "fwidth") == 0) { - return ir_constant::zero(mem_ctx, this->type); - } else if (strcmp(callee, "greaterThan") == 0) { - assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] > op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] > op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] > op[1]->value.f[c]; - break; - default: - assert(!"Should not get here."); - } - } - } else if (strcmp(callee, "greaterThanEqual") == 0) { - assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] >= op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] >= op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] >= op[1]->value.f[c]; - break; - default: - assert(!"Should not get here."); - } - } - } else if (strcmp(callee, "inversesqrt") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_rsq, type, op[0], NULL); - } else if (strcmp(callee, "length") == 0) { - return new(mem_ctx) ir_constant(sqrtf(dot(op[0], op[0]))); - } else if (strcmp(callee, "lessThan") == 0) { - assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] < op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] < op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] < op[1]->value.f[c]; - break; - default: - assert(!"Should not get here."); - } - } - } else if (strcmp(callee, "lessThanEqual") == 0) { - assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] <= op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] <= op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] <= op[1]->value.f[c]; - break; - default: - assert(!"Should not get here."); - } - } - } else if (strcmp(callee, "log") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_log, type, op[0], NULL); - } else if (strcmp(callee, "log2") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_log2, type, op[0], NULL); - } else if (strcmp(callee, "matrixCompMult") == 0) { - assert(op[0]->type->is_float() && op[1]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = op[0]->value.f[c] * op[1]->value.f[c]; - } else if (strcmp(callee, "max") == 0) { - expr = new(mem_ctx) ir_expression(ir_binop_max, type, op[0], op[1]); - } else if (strcmp(callee, "min") == 0) { - expr = new(mem_ctx) ir_expression(ir_binop_min, type, op[0], op[1]); - } else if (strcmp(callee, "mix") == 0) { - assert(op[0]->type->is_float() && op[1]->type->is_float()); - if (op[2]->type->is_float()) { - unsigned c2_inc = op[2]->type->is_scalar() ? 0 : 1; - unsigned components = op[0]->type->components(); - for (unsigned c = 0, c2 = 0; c < components; c2 += c2_inc, c++) { - data.f[c] = op[0]->value.f[c] * (1 - op[2]->value.f[c2]) + - op[1]->value.f[c] * op[2]->value.f[c2]; - } - } else { - assert(op[2]->type->is_boolean()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = op[op[2]->value.b[c] ? 1 : 0]->value.f[c]; - } - } else if (strcmp(callee, "mod") == 0) { - expr = new(mem_ctx) ir_expression(ir_binop_mod, type, op[0], op[1]); - } else if (strcmp(callee, "normalize") == 0) { - assert(op[0]->type->is_float()); - float length = sqrtf(dot(op[0], op[0])); - - if (length == 0) - return ir_constant::zero(mem_ctx, this->type); - - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = op[0]->value.f[c] / length; - } else if (strcmp(callee, "not") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_logic_not, type, op[0], NULL); - } else if (strcmp(callee, "notEqual") == 0) { - assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector()); - for (unsigned c = 0; c < op[0]->type->components(); c++) { - switch (op[0]->type->base_type) { - case GLSL_TYPE_UINT: - data.b[c] = op[0]->value.u[c] != op[1]->value.u[c]; - break; - case GLSL_TYPE_INT: - data.b[c] = op[0]->value.i[c] != op[1]->value.i[c]; - break; - case GLSL_TYPE_FLOAT: - data.b[c] = op[0]->value.f[c] != op[1]->value.f[c]; - break; - case GLSL_TYPE_BOOL: - data.b[c] = op[0]->value.b[c] != op[1]->value.b[c]; - break; - default: - assert(!"Should not get here."); - } - } - } else if (strcmp(callee, "outerProduct") == 0) { - assert(op[0]->type->is_vector() && op[1]->type->is_vector()); - const unsigned m = op[0]->type->vector_elements; - const unsigned n = op[1]->type->vector_elements; - for (unsigned j = 0; j < n; j++) { - for (unsigned i = 0; i < m; i++) { - data.f[i+m*j] = op[0]->value.f[i] * op[1]->value.f[j]; - } - } - } else if (strcmp(callee, "pow") == 0) { - expr = new(mem_ctx) ir_expression(ir_binop_pow, type, op[0], op[1]); - } else if (strcmp(callee, "radians") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = M_PI / 180.0F * op[0]->value.f[c]; - } else if (strcmp(callee, "reflect") == 0) { - assert(op[0]->type->is_float()); - float dot_NI = dot(op[1], op[0]); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = op[0]->value.f[c] - 2 * dot_NI * op[1]->value.f[c]; - } else if (strcmp(callee, "refract") == 0) { - const float eta = op[2]->value.f[0]; - const float dot_NI = dot(op[1], op[0]); - const float k = 1.0F - eta * eta * (1.0F - dot_NI * dot_NI); - if (k < 0.0) { - return ir_constant::zero(mem_ctx, this->type); - } else { - for (unsigned c = 0; c < type->components(); c++) { - data.f[c] = eta * op[0]->value.f[c] - (eta * dot_NI + sqrtf(k)) - * op[1]->value.f[c]; - } - } - } else if (strcmp(callee, "sign") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_sign, type, op[0], NULL); - } else if (strcmp(callee, "sin") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_sin, type, op[0], NULL); - } else if (strcmp(callee, "sinh") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = sinhf(op[0]->value.f[c]); - } else if (strcmp(callee, "smoothstep") == 0) { - assert(num_parameters == 3); - assert(op[1]->type == op[0]->type); - unsigned edge_inc = op[0]->type->is_scalar() ? 0 : 1; - for (unsigned c = 0, e = 0; c < type->components(); e += edge_inc, c++) { - const float edge0 = op[0]->value.f[e]; - const float edge1 = op[1]->value.f[e]; - if (edge0 == edge1) { - data.f[c] = 0.0; /* Avoid a crash - results are undefined anyway */ - } else { - const float numerator = op[2]->value.f[c] - edge0; - const float denominator = edge1 - edge0; - const float t = CLAMP(numerator/denominator, 0, 1); - data.f[c] = t * t * (3 - 2 * t); - } - } - } else if (strcmp(callee, "sqrt") == 0) { - expr = new(mem_ctx) ir_expression(ir_unop_sqrt, type, op[0], NULL); - } else if (strcmp(callee, "step") == 0) { - assert(op[0]->type->is_float() && op[1]->type->is_float()); - /* op[0] (edge) may be either a scalar or a vector */ - const unsigned c0_inc = op[0]->type->is_scalar() ? 0 : 1; - for (unsigned c = 0, c0 = 0; c < type->components(); c0 += c0_inc, c++) - data.f[c] = (op[1]->value.f[c] < op[0]->value.f[c0]) ? 0.0F : 1.0F; - } else if (strcmp(callee, "tan") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = tanf(op[0]->value.f[c]); - } else if (strcmp(callee, "tanh") == 0) { - assert(op[0]->type->is_float()); - for (unsigned c = 0; c < op[0]->type->components(); c++) - data.f[c] = tanhf(op[0]->value.f[c]); - } else if (strcmp(callee, "transpose") == 0) { - assert(op[0]->type->is_matrix()); - const unsigned n = op[0]->type->vector_elements; - const unsigned m = op[0]->type->matrix_columns; - for (unsigned j = 0; j < m; j++) { - for (unsigned i = 0; i < n; i++) { - data.f[m*i+j] += op[0]->value.f[i+n*j]; - } - } - } else { - /* Unsupported builtin - some are not allowed in constant expressions. */ - return NULL; - } - - if (expr != NULL) - return expr->constant_expression_value(); - - return new(mem_ctx) ir_constant(this->type, &data); -} +/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_constant_expression.cpp
+ * Evaluate and process constant valued expressions
+ *
+ * In GLSL, constant valued expressions are used in several places. These
+ * must be processed and evaluated very early in the compilation process.
+ *
+ * * Sizes of arrays
+ * * Initializers for uniforms
+ * * Initializers for \c const variables
+ */
+
+#include <math.h>
+#include "main/core.h" /* for MAX2, MIN2, CLAMP */
+#include "ir.h"
+#include "ir_visitor.h"
+#include "glsl_types.h"
+
+static float
+dot(ir_constant *op0, ir_constant *op1)
+{
+ assert(op0->type->is_float() && op1->type->is_float());
+
+ float result = 0;
+ for (unsigned c = 0; c < op0->type->components(); c++)
+ result += op0->value.f[c] * op1->value.f[c];
+
+ return result;
+}
+
+ir_constant *
+ir_expression::constant_expression_value()
+{
+ if (this->type->is_error())
+ return NULL;
+
+ ir_constant *op[Elements(this->operands)] = { NULL, };
+ ir_constant_data data;
+
+ memset(&data, 0, sizeof(data));
+
+ for (unsigned operand = 0; operand < this->get_num_operands(); operand++) {
+ op[operand] = this->operands[operand]->constant_expression_value();
+ if (!op[operand])
+ return NULL;
+ }
+
+ if (op[1] != NULL)
+ assert(op[0]->type->base_type == op[1]->type->base_type);
+
+ bool op0_scalar = op[0]->type->is_scalar();
+ bool op1_scalar = op[1] != NULL && op[1]->type->is_scalar();
+
+ /* When iterating over a vector or matrix's components, we want to increase
+ * the loop counter. However, for scalars, we want to stay at 0.
+ */
+ unsigned c0_inc = op0_scalar ? 0 : 1;
+ unsigned c1_inc = op1_scalar ? 0 : 1;
+ unsigned components;
+ if (op1_scalar || !op[1]) {
+ components = op[0]->type->components();
+ } else {
+ components = op[1]->type->components();
+ }
+
+ void *ctx = ralloc_parent(this);
+
+ /* Handle array operations here, rather than below. */
+ if (op[0]->type->is_array()) {
+ assert(op[1] != NULL && op[1]->type->is_array());
+ switch (this->operation) {
+ case ir_binop_all_equal:
+ return new(ctx) ir_constant(op[0]->has_value(op[1]));
+ case ir_binop_any_nequal:
+ return new(ctx) ir_constant(!op[0]->has_value(op[1]));
+ default:
+ break;
+ }
+ return NULL;
+ }
+
+ switch (this->operation) {
+ case ir_unop_bit_not:
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ for (unsigned c = 0; c < components; c++)
+ data.i[c] = ~ op[0]->value.i[c];
+ break;
+ case GLSL_TYPE_UINT:
+ for (unsigned c = 0; c < components; c++)
+ data.u[c] = ~ op[0]->value.u[c];
+ break;
+ default:
+ assert(0);
+ }
+ break;
+
+ case ir_unop_logic_not:
+ assert(op[0]->type->base_type == GLSL_TYPE_BOOL);
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.b[c] = !op[0]->value.b[c];
+ break;
+
+ case ir_unop_f2i:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.i[c] = (int) op[0]->value.f[c];
+ }
+ break;
+ case ir_unop_i2f:
+ assert(op[0]->type->base_type == GLSL_TYPE_INT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = (float) op[0]->value.i[c];
+ }
+ break;
+ case ir_unop_u2f:
+ assert(op[0]->type->base_type == GLSL_TYPE_UINT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = (float) op[0]->value.u[c];
+ }
+ break;
+ case ir_unop_b2f:
+ assert(op[0]->type->base_type == GLSL_TYPE_BOOL);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = op[0]->value.b[c] ? 1.0F : 0.0F;
+ }
+ break;
+ case ir_unop_f2b:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.b[c] = op[0]->value.f[c] != 0.0F ? true : false;
+ }
+ break;
+ case ir_unop_b2i:
+ assert(op[0]->type->base_type == GLSL_TYPE_BOOL);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.u[c] = op[0]->value.b[c] ? 1 : 0;
+ }
+ break;
+ case ir_unop_i2b:
+ assert(op[0]->type->is_integer());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.b[c] = op[0]->value.u[c] ? true : false;
+ }
+ break;
+
+ case ir_unop_any:
+ assert(op[0]->type->is_boolean());
+ data.b[0] = false;
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ if (op[0]->value.b[c])
+ data.b[0] = true;
+ }
+ break;
+
+ case ir_unop_trunc:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = truncf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_ceil:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = ceilf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_floor:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = floorf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_fract:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = 0;
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = 0;
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c] - floor(op[0]->value.f[c]);
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_unop_sin:
+ case ir_unop_sin_reduced:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = sinf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_cos:
+ case ir_unop_cos_reduced:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = cosf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_neg:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = -((int) op[0]->value.u[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = -op[0]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = -op[0]->value.f[c];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_unop_abs:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c];
+ if (data.i[c] < 0)
+ data.i[c] = -data.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = fabs(op[0]->value.f[c]);
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_unop_sign:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.i[c] > 0;
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = (op[0]->value.i[c] > 0) - (op[0]->value.i[c] < 0);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = float((op[0]->value.f[c] > 0)-(op[0]->value.f[c] < 0));
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_unop_rcp:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ if (op[0]->value.u[c] != 0.0)
+ data.u[c] = 1 / op[0]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ if (op[0]->value.i[c] != 0.0)
+ data.i[c] = 1 / op[0]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ if (op[0]->value.f[c] != 0.0)
+ data.f[c] = 1.0F / op[0]->value.f[c];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_unop_rsq:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = 1.0F / sqrtf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_sqrt:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = sqrtf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_exp:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = expf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_exp2:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = exp2f(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_log:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = logf(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_log2:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = log2f(op[0]->value.f[c]);
+ }
+ break;
+
+ case ir_unop_dFdx:
+ case ir_unop_dFdy:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = 0.0;
+ }
+ break;
+
+ case ir_binop_pow:
+ assert(op[0]->type->base_type == GLSL_TYPE_FLOAT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.f[c] = powf(op[0]->value.f[c], op[1]->value.f[c]);
+ }
+ break;
+
+ case ir_binop_dot:
+ data.f[0] = dot(op[0], op[1]);
+ break;
+
+ case ir_binop_min:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = MIN2(op[0]->value.u[c0], op[1]->value.u[c1]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = MIN2(op[0]->value.i[c0], op[1]->value.i[c1]);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = MIN2(op[0]->value.f[c0], op[1]->value.f[c1]);
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ break;
+ case ir_binop_max:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = MAX2(op[0]->value.u[c0], op[1]->value.u[c1]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = MAX2(op[0]->value.i[c0], op[1]->value.i[c1]);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = MAX2(op[0]->value.f[c0], op[1]->value.f[c1]);
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_binop_add:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] + op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] + op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] + op[1]->value.f[c1];
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ break;
+ case ir_binop_sub:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] - op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] - op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] - op[1]->value.f[c1];
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ break;
+ case ir_binop_mul:
+ /* Check for equal types, or unequal types involving scalars */
+ if ((op[0]->type == op[1]->type && !op[0]->type->is_matrix())
+ || op0_scalar || op1_scalar) {
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] * op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] * op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] * op[1]->value.f[c1];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ } else {
+ assert(op[0]->type->is_matrix() || op[1]->type->is_matrix());
+
+ /* Multiply an N-by-M matrix with an M-by-P matrix. Since either
+ * matrix can be a GLSL vector, either N or P can be 1.
+ *
+ * For vec*mat, the vector is treated as a row vector. This
+ * means the vector is a 1-row x M-column matrix.
+ *
+ * For mat*vec, the vector is treated as a column vector. Since
+ * matrix_columns is 1 for vectors, this just works.
+ */
+ const unsigned n = op[0]->type->is_vector()
+ ? 1 : op[0]->type->vector_elements;
+ const unsigned m = op[1]->type->vector_elements;
+ const unsigned p = op[1]->type->matrix_columns;
+ for (unsigned j = 0; j < p; j++) {
+ for (unsigned i = 0; i < n; i++) {
+ for (unsigned k = 0; k < m; k++) {
+ data.f[i+n*j] += op[0]->value.f[i+n*k]*op[1]->value.f[k+m*j];
+ }
+ }
+ }
+ }
+
+ break;
+ case ir_binop_div:
+ /* FINISHME: Emit warning when division-by-zero is detected. */
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ if (op[1]->value.u[c1] == 0) {
+ data.u[c] = 0;
+ } else {
+ data.u[c] = op[0]->value.u[c0] / op[1]->value.u[c1];
+ }
+ break;
+ case GLSL_TYPE_INT:
+ if (op[1]->value.i[c1] == 0) {
+ data.i[c] = 0;
+ } else {
+ data.i[c] = op[0]->value.i[c0] / op[1]->value.i[c1];
+ }
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] / op[1]->value.f[c1];
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ break;
+ case ir_binop_mod:
+ /* FINISHME: Emit warning when division-by-zero is detected. */
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ if (op[1]->value.u[c1] == 0) {
+ data.u[c] = 0;
+ } else {
+ data.u[c] = op[0]->value.u[c0] % op[1]->value.u[c1];
+ }
+ break;
+ case GLSL_TYPE_INT:
+ if (op[1]->value.i[c1] == 0) {
+ data.i[c] = 0;
+ } else {
+ data.i[c] = op[0]->value.i[c0] % op[1]->value.i[c1];
+ }
+ break;
+ case GLSL_TYPE_FLOAT:
+ /* We don't use fmod because it rounds toward zero; GLSL specifies
+ * the use of floor.
+ */
+ data.f[c] = op[0]->value.f[c0] - op[1]->value.f[c1]
+ * floorf(op[0]->value.f[c0] / op[1]->value.f[c1]);
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ break;
+
+ case ir_binop_logic_and:
+ assert(op[0]->type->base_type == GLSL_TYPE_BOOL);
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.b[c] = op[0]->value.b[c] && op[1]->value.b[c];
+ break;
+ case ir_binop_logic_xor:
+ assert(op[0]->type->base_type == GLSL_TYPE_BOOL);
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.b[c] = op[0]->value.b[c] ^ op[1]->value.b[c];
+ break;
+ case ir_binop_logic_or:
+ assert(op[0]->type->base_type == GLSL_TYPE_BOOL);
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.b[c] = op[0]->value.b[c] || op[1]->value.b[c];
+ break;
+
+ case ir_binop_less:
+ assert(op[0]->type == op[1]->type);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[0] = op[0]->value.u[0] < op[1]->value.u[0];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[0] = op[0]->value.i[0] < op[1]->value.i[0];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[0] = op[0]->value.f[0] < op[1]->value.f[0];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+ case ir_binop_greater:
+ assert(op[0]->type == op[1]->type);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] > op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] > op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] > op[1]->value.f[c];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+ case ir_binop_lequal:
+ assert(op[0]->type == op[1]->type);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[0] = op[0]->value.u[0] <= op[1]->value.u[0];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[0] = op[0]->value.i[0] <= op[1]->value.i[0];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[0] = op[0]->value.f[0] <= op[1]->value.f[0];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+ case ir_binop_gequal:
+ assert(op[0]->type == op[1]->type);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[0] = op[0]->value.u[0] >= op[1]->value.u[0];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[0] = op[0]->value.i[0] >= op[1]->value.i[0];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[0] = op[0]->value.f[0] >= op[1]->value.f[0];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+ case ir_binop_equal:
+ assert(op[0]->type == op[1]->type);
+ for (unsigned c = 0; c < components; c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] == op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] == op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] == op[1]->value.f[c];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+ case ir_binop_nequal:
+ assert(op[0]->type != op[1]->type);
+ for (unsigned c = 0; c < components; c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] != op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] != op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] != op[1]->value.f[c];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+ case ir_binop_all_equal:
+ data.b[0] = op[0]->has_value(op[1]);
+ break;
+ case ir_binop_any_nequal:
+ data.b[0] = !op[0]->has_value(op[1]);
+ break;
+
+ case ir_binop_lshift:
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ if (op[0]->type->base_type == GLSL_TYPE_INT &&
+ op[1]->type->base_type == GLSL_TYPE_INT) {
+ data.i[c] = op[0]->value.i[c0] << op[1]->value.i[c1];
+
+ } else if (op[0]->type->base_type == GLSL_TYPE_INT &&
+ op[1]->type->base_type == GLSL_TYPE_UINT) {
+ data.i[c] = op[0]->value.i[c0] << op[1]->value.u[c1];
+
+ } else if (op[0]->type->base_type == GLSL_TYPE_UINT &&
+ op[1]->type->base_type == GLSL_TYPE_INT) {
+ data.u[c] = op[0]->value.u[c0] << op[1]->value.i[c1];
+
+ } else if (op[0]->type->base_type == GLSL_TYPE_UINT &&
+ op[1]->type->base_type == GLSL_TYPE_UINT) {
+ data.u[c] = op[0]->value.u[c0] << op[1]->value.u[c1];
+ }
+ }
+ break;
+
+ case ir_binop_rshift:
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ if (op[0]->type->base_type == GLSL_TYPE_INT &&
+ op[1]->type->base_type == GLSL_TYPE_INT) {
+ data.i[c] = op[0]->value.i[c0] >> op[1]->value.i[c1];
+
+ } else if (op[0]->type->base_type == GLSL_TYPE_INT &&
+ op[1]->type->base_type == GLSL_TYPE_UINT) {
+ data.i[c] = op[0]->value.i[c0] >> op[1]->value.u[c1];
+
+ } else if (op[0]->type->base_type == GLSL_TYPE_UINT &&
+ op[1]->type->base_type == GLSL_TYPE_INT) {
+ data.u[c] = op[0]->value.u[c0] >> op[1]->value.i[c1];
+
+ } else if (op[0]->type->base_type == GLSL_TYPE_UINT &&
+ op[1]->type->base_type == GLSL_TYPE_UINT) {
+ data.u[c] = op[0]->value.u[c0] >> op[1]->value.u[c1];
+ }
+ }
+ break;
+
+ case ir_binop_bit_and:
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] & op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] & op[1]->value.u[c1];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_binop_bit_or:
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] | op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] | op[1]->value.u[c1];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_binop_bit_xor:
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] ^ op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] ^ op[1]->value.u[c1];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ case ir_quadop_vector:
+ for (unsigned c = 0; c < this->type->vector_elements; c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.i[c] = op[c]->value.i[0];
+ break;
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[c]->value.u[0];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[c]->value.f[0];
+ break;
+ default:
+ assert(0);
+ }
+ }
+ break;
+
+ default:
+ /* FINISHME: Should handle all expression types. */
+ return NULL;
+ }
+
+ return new(ctx) ir_constant(this->type, &data);
+}
+
+
+ir_constant *
+ir_texture::constant_expression_value()
+{
+ /* texture lookups aren't constant expressions */
+ return NULL;
+}
+
+
+ir_constant *
+ir_swizzle::constant_expression_value()
+{
+ ir_constant *v = this->val->constant_expression_value();
+
+ if (v != NULL) {
+ ir_constant_data data = { { 0 } };
+
+ const unsigned swiz_idx[4] = {
+ this->mask.x, this->mask.y, this->mask.z, this->mask.w
+ };
+
+ for (unsigned i = 0; i < this->mask.num_components; i++) {
+ switch (v->type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT: data.u[i] = v->value.u[swiz_idx[i]]; break;
+ case GLSL_TYPE_FLOAT: data.f[i] = v->value.f[swiz_idx[i]]; break;
+ case GLSL_TYPE_BOOL: data.b[i] = v->value.b[swiz_idx[i]]; break;
+ default: assert(!"Should not get here."); break;
+ }
+ }
+
+ void *ctx = ralloc_parent(this);
+ return new(ctx) ir_constant(this->type, &data);
+ }
+ return NULL;
+}
+
+
+ir_constant *
+ir_dereference_variable::constant_expression_value()
+{
+ /* This may occur during compile and var->type is glsl_type::error_type */
+ if (!var)
+ return NULL;
+
+ /* The constant_value of a uniform variable is its initializer,
+ * not the lifetime constant value of the uniform.
+ */
+ if (var->mode == ir_var_uniform)
+ return NULL;
+
+ if (!var->constant_value)
+ return NULL;
+
+ return var->constant_value->clone(ralloc_parent(var), NULL);
+}
+
+
+ir_constant *
+ir_dereference_array::constant_expression_value()
+{
+ ir_constant *array = this->array->constant_expression_value();
+ ir_constant *idx = this->array_index->constant_expression_value();
+
+ if ((array != NULL) && (idx != NULL)) {
+ void *ctx = ralloc_parent(this);
+ if (array->type->is_matrix()) {
+ /* Array access of a matrix results in a vector.
+ */
+ const unsigned column = idx->value.u[0];
+
+ const glsl_type *const column_type = array->type->column_type();
+
+ /* Offset in the constant matrix to the first element of the column
+ * to be extracted.
+ */
+ const unsigned mat_idx = column * column_type->vector_elements;
+
+ ir_constant_data data = { { 0 } };
+
+ switch (column_type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ for (unsigned i = 0; i < column_type->vector_elements; i++)
+ data.u[i] = array->value.u[mat_idx + i];
+
+ break;
+
+ case GLSL_TYPE_FLOAT:
+ for (unsigned i = 0; i < column_type->vector_elements; i++)
+ data.f[i] = array->value.f[mat_idx + i];
+
+ break;
+
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+
+ return new(ctx) ir_constant(column_type, &data);
+ } else if (array->type->is_vector()) {
+ const unsigned component = idx->value.u[0];
+
+ return new(ctx) ir_constant(array, component);
+ } else {
+ const unsigned index = idx->value.u[0];
+ return array->get_array_element(index)->clone(ctx, NULL);
+ }
+ }
+ return NULL;
+}
+
+
+ir_constant *
+ir_dereference_record::constant_expression_value()
+{
+ ir_constant *v = this->record->constant_expression_value();
+
+ return (v != NULL) ? v->get_record_field(this->field) : NULL;
+}
+
+
+ir_constant *
+ir_assignment::constant_expression_value()
+{
+ /* FINISHME: Handle CEs involving assignment (return RHS) */
+ return NULL;
+}
+
+
+ir_constant *
+ir_constant::constant_expression_value()
+{
+ return this;
+}
+
+
+ir_constant *
+ir_call::constant_expression_value()
+{
+ if (this->type == glsl_type::error_type)
+ return NULL;
+
+ /* From the GLSL 1.20 spec, page 23:
+ * "Function calls to user-defined functions (non-built-in functions)
+ * cannot be used to form constant expressions."
+ */
+ if (!this->callee->is_builtin)
+ return NULL;
+
+ unsigned num_parameters = 0;
+
+ /* Check if all parameters are constant */
+ ir_constant *op[3];
+ foreach_list(n, &this->actual_parameters) {
+ ir_constant *constant = ((ir_rvalue *) n)->constant_expression_value();
+ if (constant == NULL)
+ return NULL;
+
+ op[num_parameters] = constant;
+
+ assert(num_parameters < 3);
+ num_parameters++;
+ }
+
+ /* Individual cases below can either:
+ * - Assign "expr" a new ir_expression to evaluate (for basic opcodes)
+ * - Fill "data" with appopriate constant data
+ * - Return an ir_constant directly.
+ */
+ void *mem_ctx = ralloc_parent(this);
+ ir_expression *expr = NULL;
+
+ ir_constant_data data;
+ memset(&data, 0, sizeof(data));
+
+ const char *callee = this->callee_name();
+ if (strcmp(callee, "abs") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_abs, type, op[0], NULL);
+ } else if (strcmp(callee, "all") == 0) {
+ assert(op[0]->type->is_boolean());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ if (!op[0]->value.b[c])
+ return new(mem_ctx) ir_constant(false);
+ }
+ return new(mem_ctx) ir_constant(true);
+ } else if (strcmp(callee, "any") == 0) {
+ assert(op[0]->type->is_boolean());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ if (op[0]->value.b[c])
+ return new(mem_ctx) ir_constant(true);
+ }
+ return new(mem_ctx) ir_constant(false);
+ } else if (strcmp(callee, "acos") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = acosf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "acosh") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = acoshf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "asin") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = asinf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "asinh") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = asinhf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "atan") == 0) {
+ assert(op[0]->type->is_float());
+ if (num_parameters == 2) {
+ assert(op[1]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = atan2f(op[0]->value.f[c], op[1]->value.f[c]);
+ } else {
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = atanf(op[0]->value.f[c]);
+ }
+ } else if (strcmp(callee, "atanh") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = atanhf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "dFdx") == 0 || strcmp(callee, "dFdy") == 0) {
+ return ir_constant::zero(mem_ctx, this->type);
+ } else if (strcmp(callee, "ceil") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_ceil, type, op[0], NULL);
+ } else if (strcmp(callee, "clamp") == 0) {
+ assert(num_parameters == 3);
+ unsigned c1_inc = op[1]->type->is_scalar() ? 0 : 1;
+ unsigned c2_inc = op[2]->type->is_scalar() ? 0 : 1;
+ for (unsigned c = 0, c1 = 0, c2 = 0;
+ c < op[0]->type->components();
+ c1 += c1_inc, c2 += c2_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = CLAMP(op[0]->value.u[c], op[1]->value.u[c1],
+ op[2]->value.u[c2]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = CLAMP(op[0]->value.i[c], op[1]->value.i[c1],
+ op[2]->value.i[c2]);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = CLAMP(op[0]->value.f[c], op[1]->value.f[c1],
+ op[2]->value.f[c2]);
+ break;
+ default:
+ assert(!"Should not get here.");
+ }
+ }
+ } else if (strcmp(callee, "cos") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_cos, type, op[0], NULL);
+ } else if (strcmp(callee, "cosh") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = coshf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "cross") == 0) {
+ assert(op[0]->type == glsl_type::vec3_type);
+ assert(op[1]->type == glsl_type::vec3_type);
+ data.f[0] = (op[0]->value.f[1] * op[1]->value.f[2] -
+ op[1]->value.f[1] * op[0]->value.f[2]);
+ data.f[1] = (op[0]->value.f[2] * op[1]->value.f[0] -
+ op[1]->value.f[2] * op[0]->value.f[0]);
+ data.f[2] = (op[0]->value.f[0] * op[1]->value.f[1] -
+ op[1]->value.f[0] * op[0]->value.f[1]);
+ } else if (strcmp(callee, "degrees") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = 180.0F / M_PI * op[0]->value.f[c];
+ } else if (strcmp(callee, "distance") == 0) {
+ assert(op[0]->type->is_float() && op[1]->type->is_float());
+ float length_squared = 0.0;
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ float t = op[0]->value.f[c] - op[1]->value.f[c];
+ length_squared += t * t;
+ }
+ return new(mem_ctx) ir_constant(sqrtf(length_squared));
+ } else if (strcmp(callee, "dot") == 0) {
+ return new(mem_ctx) ir_constant(dot(op[0], op[1]));
+ } else if (strcmp(callee, "equal") == 0) {
+ assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] == op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] == op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] == op[1]->value.f[c];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] == op[1]->value.b[c];
+ break;
+ default:
+ assert(!"Should not get here.");
+ }
+ }
+ } else if (strcmp(callee, "exp") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_exp, type, op[0], NULL);
+ } else if (strcmp(callee, "exp2") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_exp2, type, op[0], NULL);
+ } else if (strcmp(callee, "faceforward") == 0) {
+ if (dot(op[2], op[1]) < 0)
+ return op[0];
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = -op[0]->value.f[c];
+ } else if (strcmp(callee, "floor") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_floor, type, op[0], NULL);
+ } else if (strcmp(callee, "fract") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_fract, type, op[0], NULL);
+ } else if (strcmp(callee, "fwidth") == 0) {
+ return ir_constant::zero(mem_ctx, this->type);
+ } else if (strcmp(callee, "greaterThan") == 0) {
+ assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] > op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] > op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] > op[1]->value.f[c];
+ break;
+ default:
+ assert(!"Should not get here.");
+ }
+ }
+ } else if (strcmp(callee, "greaterThanEqual") == 0) {
+ assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] >= op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] >= op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] >= op[1]->value.f[c];
+ break;
+ default:
+ assert(!"Should not get here.");
+ }
+ }
+ } else if (strcmp(callee, "inversesqrt") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_rsq, type, op[0], NULL);
+ } else if (strcmp(callee, "length") == 0) {
+ return new(mem_ctx) ir_constant(sqrtf(dot(op[0], op[0])));
+ } else if (strcmp(callee, "lessThan") == 0) {
+ assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] < op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] < op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] < op[1]->value.f[c];
+ break;
+ default:
+ assert(!"Should not get here.");
+ }
+ }
+ } else if (strcmp(callee, "lessThanEqual") == 0) {
+ assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] <= op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] <= op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] <= op[1]->value.f[c];
+ break;
+ default:
+ assert(!"Should not get here.");
+ }
+ }
+ } else if (strcmp(callee, "log") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_log, type, op[0], NULL);
+ } else if (strcmp(callee, "log2") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_log2, type, op[0], NULL);
+ } else if (strcmp(callee, "matrixCompMult") == 0) {
+ assert(op[0]->type->is_float() && op[1]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = op[0]->value.f[c] * op[1]->value.f[c];
+ } else if (strcmp(callee, "max") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_binop_max, type, op[0], op[1]);
+ } else if (strcmp(callee, "min") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_binop_min, type, op[0], op[1]);
+ } else if (strcmp(callee, "mix") == 0) {
+ assert(op[0]->type->is_float() && op[1]->type->is_float());
+ if (op[2]->type->is_float()) {
+ unsigned c2_inc = op[2]->type->is_scalar() ? 0 : 1;
+ unsigned components = op[0]->type->components();
+ for (unsigned c = 0, c2 = 0; c < components; c2 += c2_inc, c++) {
+ data.f[c] = op[0]->value.f[c] * (1 - op[2]->value.f[c2]) +
+ op[1]->value.f[c] * op[2]->value.f[c2];
+ }
+ } else {
+ assert(op[2]->type->is_boolean());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = op[op[2]->value.b[c] ? 1 : 0]->value.f[c];
+ }
+ } else if (strcmp(callee, "mod") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_binop_mod, type, op[0], op[1]);
+ } else if (strcmp(callee, "normalize") == 0) {
+ assert(op[0]->type->is_float());
+ float length = sqrtf(dot(op[0], op[0]));
+
+ if (length == 0)
+ return ir_constant::zero(mem_ctx, this->type);
+
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = op[0]->value.f[c] / length;
+ } else if (strcmp(callee, "not") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_logic_not, type, op[0], NULL);
+ } else if (strcmp(callee, "notEqual") == 0) {
+ assert(op[0]->type->is_vector() && op[1] && op[1]->type->is_vector());
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] != op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] != op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] != op[1]->value.f[c];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] != op[1]->value.b[c];
+ break;
+ default:
+ assert(!"Should not get here.");
+ }
+ }
+ } else if (strcmp(callee, "outerProduct") == 0) {
+ assert(op[0]->type->is_vector() && op[1]->type->is_vector());
+ const unsigned m = op[0]->type->vector_elements;
+ const unsigned n = op[1]->type->vector_elements;
+ for (unsigned j = 0; j < n; j++) {
+ for (unsigned i = 0; i < m; i++) {
+ data.f[i+m*j] = op[0]->value.f[i] * op[1]->value.f[j];
+ }
+ }
+ } else if (strcmp(callee, "pow") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_binop_pow, type, op[0], op[1]);
+ } else if (strcmp(callee, "radians") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = M_PI / 180.0F * op[0]->value.f[c];
+ } else if (strcmp(callee, "reflect") == 0) {
+ assert(op[0]->type->is_float());
+ float dot_NI = dot(op[1], op[0]);
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = op[0]->value.f[c] - 2 * dot_NI * op[1]->value.f[c];
+ } else if (strcmp(callee, "refract") == 0) {
+ const float eta = op[2]->value.f[0];
+ const float dot_NI = dot(op[1], op[0]);
+ const float k = 1.0F - eta * eta * (1.0F - dot_NI * dot_NI);
+ if (k < 0.0) {
+ return ir_constant::zero(mem_ctx, this->type);
+ } else {
+ for (unsigned c = 0; c < type->components(); c++) {
+ data.f[c] = eta * op[0]->value.f[c] - (eta * dot_NI + sqrtf(k))
+ * op[1]->value.f[c];
+ }
+ }
+ } else if (strcmp(callee, "sign") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_sign, type, op[0], NULL);
+ } else if (strcmp(callee, "sin") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_sin, type, op[0], NULL);
+ } else if (strcmp(callee, "sinh") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = sinhf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "smoothstep") == 0) {
+ assert(num_parameters == 3);
+ assert(op[1]->type == op[0]->type);
+ unsigned edge_inc = op[0]->type->is_scalar() ? 0 : 1;
+ for (unsigned c = 0, e = 0; c < type->components(); e += edge_inc, c++) {
+ const float edge0 = op[0]->value.f[e];
+ const float edge1 = op[1]->value.f[e];
+ if (edge0 == edge1) {
+ data.f[c] = 0.0; /* Avoid a crash - results are undefined anyway */
+ } else {
+ const float numerator = op[2]->value.f[c] - edge0;
+ const float denominator = edge1 - edge0;
+ const float t = CLAMP(numerator/denominator, 0, 1);
+ data.f[c] = t * t * (3 - 2 * t);
+ }
+ }
+ } else if (strcmp(callee, "sqrt") == 0) {
+ expr = new(mem_ctx) ir_expression(ir_unop_sqrt, type, op[0], NULL);
+ } else if (strcmp(callee, "step") == 0) {
+ assert(op[0]->type->is_float() && op[1]->type->is_float());
+ /* op[0] (edge) may be either a scalar or a vector */
+ const unsigned c0_inc = op[0]->type->is_scalar() ? 0 : 1;
+ for (unsigned c = 0, c0 = 0; c < type->components(); c0 += c0_inc, c++)
+ data.f[c] = (op[1]->value.f[c] < op[0]->value.f[c0]) ? 0.0F : 1.0F;
+ } else if (strcmp(callee, "tan") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = tanf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "tanh") == 0) {
+ assert(op[0]->type->is_float());
+ for (unsigned c = 0; c < op[0]->type->components(); c++)
+ data.f[c] = tanhf(op[0]->value.f[c]);
+ } else if (strcmp(callee, "transpose") == 0) {
+ assert(op[0]->type->is_matrix());
+ const unsigned n = op[0]->type->vector_elements;
+ const unsigned m = op[0]->type->matrix_columns;
+ for (unsigned j = 0; j < m; j++) {
+ for (unsigned i = 0; i < n; i++) {
+ data.f[m*i+j] += op[0]->value.f[i+n*j];
+ }
+ }
+ } else {
+ /* Unsupported builtin - some are not allowed in constant expressions. */
+ return NULL;
+ }
+
+ if (expr != NULL)
+ return expr->constant_expression_value();
+
+ return new(mem_ctx) ir_constant(this->type, &data);
+}
diff --git a/mesalib/src/glsl/linker.cpp b/mesalib/src/glsl/linker.cpp index 46cd1950c..a9a2a5e20 100644 --- a/mesalib/src/glsl/linker.cpp +++ b/mesalib/src/glsl/linker.cpp @@ -1,1713 +1,1713 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -/** - * \file linker.cpp - * GLSL linker implementation - * - * Given a set of shaders that are to be linked to generate a final program, - * there are three distinct stages. - * - * In the first stage shaders are partitioned into groups based on the shader - * type. All shaders of a particular type (e.g., vertex shaders) are linked - * together. - * - * - Undefined references in each shader are resolve to definitions in - * another shader. - * - Types and qualifiers of uniforms, outputs, and global variables defined - * in multiple shaders with the same name are verified to be the same. - * - Initializers for uniforms and global variables defined - * in multiple shaders with the same name are verified to be the same. - * - * The result, in the terminology of the GLSL spec, is a set of shader - * executables for each processing unit. - * - * After the first stage is complete, a series of semantic checks are performed - * on each of the shader executables. - * - * - Each shader executable must define a \c main function. - * - Each vertex shader executable must write to \c gl_Position. - * - Each fragment shader executable must write to either \c gl_FragData or - * \c gl_FragColor. - * - * In the final stage individual shader executables are linked to create a - * complete exectuable. - * - * - Types of uniforms defined in multiple shader stages with the same name - * are verified to be the same. - * - Initializers for uniforms defined in multiple shader stages with the - * same name are verified to be the same. - * - Types and qualifiers of outputs defined in one stage are verified to - * be the same as the types and qualifiers of inputs defined with the same - * name in a later stage. - * - * \author Ian Romanick <ian.d.romanick@intel.com> - */ -#include <cstdlib> -#include <cstdio> -#include <cstdarg> -#include <climits> - -#include "main/core.h" -#include "glsl_symbol_table.h" -#include "ir.h" -#include "program.h" -#include "program/hash_table.h" -#include "linker.h" -#include "ir_optimization.h" - -extern "C" { -#include "main/shaderobj.h" -} - -/** - * Visitor that determines whether or not a variable is ever written. - */ -class find_assignment_visitor : public ir_hierarchical_visitor { -public: - find_assignment_visitor(const char *name) - : name(name), found(false) - { - /* empty */ - } - - virtual ir_visitor_status visit_enter(ir_assignment *ir) - { - ir_variable *const var = ir->lhs->variable_referenced(); - - if (strcmp(name, var->name) == 0) { - found = true; - return visit_stop; - } - - return visit_continue_with_parent; - } - - virtual ir_visitor_status visit_enter(ir_call *ir) - { - exec_list_iterator sig_iter = ir->get_callee()->parameters.iterator(); - foreach_iter(exec_list_iterator, iter, *ir) { - ir_rvalue *param_rval = (ir_rvalue *)iter.get(); - ir_variable *sig_param = (ir_variable *)sig_iter.get(); - - if (sig_param->mode == ir_var_out || - sig_param->mode == ir_var_inout) { - ir_variable *var = param_rval->variable_referenced(); - if (var && strcmp(name, var->name) == 0) { - found = true; - return visit_stop; - } - } - sig_iter.next(); - } - - return visit_continue_with_parent; - } - - bool variable_found() - { - return found; - } - -private: - const char *name; /**< Find writes to a variable with this name. */ - bool found; /**< Was a write to the variable found? */ -}; - - -/** - * Visitor that determines whether or not a variable is ever read. - */ -class find_deref_visitor : public ir_hierarchical_visitor { -public: - find_deref_visitor(const char *name) - : name(name), found(false) - { - /* empty */ - } - - virtual ir_visitor_status visit(ir_dereference_variable *ir) - { - if (strcmp(this->name, ir->var->name) == 0) { - this->found = true; - return visit_stop; - } - - return visit_continue; - } - - bool variable_found() const - { - return this->found; - } - -private: - const char *name; /**< Find writes to a variable with this name. */ - bool found; /**< Was a write to the variable found? */ -}; - - -void -linker_error_printf(gl_shader_program *prog, const char *fmt, ...) -{ - va_list ap; - - ralloc_strcat(&prog->InfoLog, "error: "); - va_start(ap, fmt); - ralloc_vasprintf_append(&prog->InfoLog, fmt, ap); - va_end(ap); -} - - -void -invalidate_variable_locations(gl_shader *sh, enum ir_variable_mode mode, - int generic_base) -{ - foreach_list(node, sh->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - if ((var == NULL) || (var->mode != (unsigned) mode)) - continue; - - /* Only assign locations for generic attributes / varyings / etc. - */ - if ((var->location >= generic_base) && !var->explicit_location) - var->location = -1; - } -} - - -/** - * Determine the number of attribute slots required for a particular type - * - * This code is here because it implements the language rules of a specific - * GLSL version. Since it's a property of the language and not a property of - * types in general, it doesn't really belong in glsl_type. - */ -unsigned -count_attribute_slots(const glsl_type *t) -{ - /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec: - * - * "A scalar input counts the same amount against this limit as a vec4, - * so applications may want to consider packing groups of four - * unrelated float inputs together into a vector to better utilize the - * capabilities of the underlying hardware. A matrix input will use up - * multiple locations. The number of locations used will equal the - * number of columns in the matrix." - * - * The spec does not explicitly say how arrays are counted. However, it - * should be safe to assume the total number of slots consumed by an array - * is the number of entries in the array multiplied by the number of slots - * consumed by a single element of the array. - */ - - if (t->is_array()) - return t->array_size() * count_attribute_slots(t->element_type()); - - if (t->is_matrix()) - return t->matrix_columns; - - return 1; -} - - -/** - * Verify that a vertex shader executable meets all semantic requirements - * - * \param shader Vertex shader executable to be verified - */ -bool -validate_vertex_shader_executable(struct gl_shader_program *prog, - struct gl_shader *shader) -{ - if (shader == NULL) - return true; - - find_assignment_visitor find("gl_Position"); - find.run(shader->ir); - if (!find.variable_found()) { - linker_error_printf(prog, - "vertex shader does not write to `gl_Position'\n"); - return false; - } - - return true; -} - - -/** - * Verify that a fragment shader executable meets all semantic requirements - * - * \param shader Fragment shader executable to be verified - */ -bool -validate_fragment_shader_executable(struct gl_shader_program *prog, - struct gl_shader *shader) -{ - if (shader == NULL) - return true; - - find_assignment_visitor frag_color("gl_FragColor"); - find_assignment_visitor frag_data("gl_FragData"); - - frag_color.run(shader->ir); - frag_data.run(shader->ir); - - if (frag_color.variable_found() && frag_data.variable_found()) { - linker_error_printf(prog, "fragment shader writes to both " - "`gl_FragColor' and `gl_FragData'\n"); - return false; - } - - return true; -} - - -/** - * Generate a string describing the mode of a variable - */ -static const char * -mode_string(const ir_variable *var) -{ - switch (var->mode) { - case ir_var_auto: - return (var->read_only) ? "global constant" : "global variable"; - - case ir_var_uniform: return "uniform"; - case ir_var_in: return "shader input"; - case ir_var_out: return "shader output"; - case ir_var_inout: return "shader inout"; - - case ir_var_const_in: - case ir_var_temporary: - default: - assert(!"Should not get here."); - return "invalid variable"; - } -} - - -/** - * Perform validation of global variables used across multiple shaders - */ -bool -cross_validate_globals(struct gl_shader_program *prog, - struct gl_shader **shader_list, - unsigned num_shaders, - bool uniforms_only) -{ - /* Examine all of the uniforms in all of the shaders and cross validate - * them. - */ - glsl_symbol_table variables; - for (unsigned i = 0; i < num_shaders; i++) { - if (shader_list[i] == NULL) - continue; - - foreach_list(node, shader_list[i]->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - if (var == NULL) - continue; - - if (uniforms_only && (var->mode != ir_var_uniform)) - continue; - - /* Don't cross validate temporaries that are at global scope. These - * will eventually get pulled into the shaders 'main'. - */ - if (var->mode == ir_var_temporary) - continue; - - /* If a global with this name has already been seen, verify that the - * new instance has the same type. In addition, if the globals have - * initializers, the values of the initializers must be the same. - */ - ir_variable *const existing = variables.get_variable(var->name); - if (existing != NULL) { - if (var->type != existing->type) { - /* Consider the types to be "the same" if both types are arrays - * of the same type and one of the arrays is implicitly sized. - * In addition, set the type of the linked variable to the - * explicitly sized array. - */ - if (var->type->is_array() - && existing->type->is_array() - && (var->type->fields.array == existing->type->fields.array) - && ((var->type->length == 0) - || (existing->type->length == 0))) { - if (var->type->length != 0) { - existing->type = var->type; - } - } else { - linker_error_printf(prog, "%s `%s' declared as type " - "`%s' and type `%s'\n", - mode_string(var), - var->name, var->type->name, - existing->type->name); - return false; - } - } - - if (var->explicit_location) { - if (existing->explicit_location - && (var->location != existing->location)) { - linker_error_printf(prog, "explicit locations for %s " - "`%s' have differing values\n", - mode_string(var), var->name); - return false; - } - - existing->location = var->location; - existing->explicit_location = true; - } - - /* Validate layout qualifiers for gl_FragDepth. - * - * From the AMD_conservative_depth spec: - * "If gl_FragDepth is redeclared in any fragment shader in - * a program, it must be redeclared in all fragment shaders in that - * program that have static assignments to gl_FragDepth. All - * redeclarations of gl_FragDepth in all fragment shaders in - * a single program must have the same set of qualifiers." - */ - if (strcmp(var->name, "gl_FragDepth") == 0) { - bool layout_declared = var->depth_layout != ir_depth_layout_none; - bool layout_differs = var->depth_layout != existing->depth_layout; - if (layout_declared && layout_differs) { - linker_error_printf(prog, - "All redeclarations of gl_FragDepth in all fragment shaders " - "in a single program must have the same set of qualifiers."); - } - if (var->used && layout_differs) { - linker_error_printf(prog, - "If gl_FragDepth is redeclared with a layout qualifier in" - "any fragment shader, it must be redeclared with the same" - "layout qualifier in all fragment shaders that have" - "assignments to gl_FragDepth"); - } - } - - /* FINISHME: Handle non-constant initializers. - */ - if (var->constant_value != NULL) { - if (existing->constant_value != NULL) { - if (!var->constant_value->has_value(existing->constant_value)) { - linker_error_printf(prog, "initializers for %s " - "`%s' have differing values\n", - mode_string(var), var->name); - return false; - } - } else - /* If the first-seen instance of a particular uniform did not - * have an initializer but a later instance does, copy the - * initializer to the version stored in the symbol table. - */ - /* FINISHME: This is wrong. The constant_value field should - * FINISHME: not be modified! Imagine a case where a shader - * FINISHME: without an initializer is linked in two different - * FINISHME: programs with shaders that have differing - * FINISHME: initializers. Linking with the first will - * FINISHME: modify the shader, and linking with the second - * FINISHME: will fail. - */ - existing->constant_value = - var->constant_value->clone(ralloc_parent(existing), NULL); - } - - if (existing->invariant != var->invariant) { - linker_error_printf(prog, "declarations for %s `%s' have " - "mismatching invariant qualifiers\n", - mode_string(var), var->name); - return false; - } - if (existing->centroid != var->centroid) { - linker_error_printf(prog, "declarations for %s `%s' have " - "mismatching centroid qualifiers\n", - mode_string(var), var->name); - return false; - } - } else - variables.add_variable(var); - } - } - - return true; -} - - -/** - * Perform validation of uniforms used across multiple shader stages - */ -bool -cross_validate_uniforms(struct gl_shader_program *prog) -{ - return cross_validate_globals(prog, prog->_LinkedShaders, - MESA_SHADER_TYPES, true); -} - - -/** - * Validate that outputs from one stage match inputs of another - */ -bool -cross_validate_outputs_to_inputs(struct gl_shader_program *prog, - gl_shader *producer, gl_shader *consumer) -{ - glsl_symbol_table parameters; - /* FINISHME: Figure these out dynamically. */ - const char *const producer_stage = "vertex"; - const char *const consumer_stage = "fragment"; - - /* Find all shader outputs in the "producer" stage. - */ - foreach_list(node, producer->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - /* FINISHME: For geometry shaders, this should also look for inout - * FINISHME: variables. - */ - if ((var == NULL) || (var->mode != ir_var_out)) - continue; - - parameters.add_variable(var); - } - - - /* Find all shader inputs in the "consumer" stage. Any variables that have - * matching outputs already in the symbol table must have the same type and - * qualifiers. - */ - foreach_list(node, consumer->ir) { - ir_variable *const input = ((ir_instruction *) node)->as_variable(); - - /* FINISHME: For geometry shaders, this should also look for inout - * FINISHME: variables. - */ - if ((input == NULL) || (input->mode != ir_var_in)) - continue; - - ir_variable *const output = parameters.get_variable(input->name); - if (output != NULL) { - /* Check that the types match between stages. - */ - if (input->type != output->type) { - /* There is a bit of a special case for gl_TexCoord. This - * built-in is unsized by default. Appliations that variable - * access it must redeclare it with a size. There is some - * language in the GLSL spec that implies the fragment shader - * and vertex shader do not have to agree on this size. Other - * driver behave this way, and one or two applications seem to - * rely on it. - * - * Neither declaration needs to be modified here because the array - * sizes are fixed later when update_array_sizes is called. - * - * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec: - * - * "Unlike user-defined varying variables, the built-in - * varying variables don't have a strict one-to-one - * correspondence between the vertex language and the - * fragment language." - */ - if (!output->type->is_array() - || (strncmp("gl_", output->name, 3) != 0)) { - linker_error_printf(prog, - "%s shader output `%s' declared as " - "type `%s', but %s shader input declared " - "as type `%s'\n", - producer_stage, output->name, - output->type->name, - consumer_stage, input->type->name); - return false; - } - } - - /* Check that all of the qualifiers match between stages. - */ - if (input->centroid != output->centroid) { - linker_error_printf(prog, - "%s shader output `%s' %s centroid qualifier, " - "but %s shader input %s centroid qualifier\n", - producer_stage, - output->name, - (output->centroid) ? "has" : "lacks", - consumer_stage, - (input->centroid) ? "has" : "lacks"); - return false; - } - - if (input->invariant != output->invariant) { - linker_error_printf(prog, - "%s shader output `%s' %s invariant qualifier, " - "but %s shader input %s invariant qualifier\n", - producer_stage, - output->name, - (output->invariant) ? "has" : "lacks", - consumer_stage, - (input->invariant) ? "has" : "lacks"); - return false; - } - - if (input->interpolation != output->interpolation) { - linker_error_printf(prog, - "%s shader output `%s' specifies %s " - "interpolation qualifier, " - "but %s shader input specifies %s " - "interpolation qualifier\n", - producer_stage, - output->name, - output->interpolation_string(), - consumer_stage, - input->interpolation_string()); - return false; - } - } - } - - return true; -} - - -/** - * Populates a shaders symbol table with all global declarations - */ -static void -populate_symbol_table(gl_shader *sh) -{ - sh->symbols = new(sh) glsl_symbol_table; - - foreach_list(node, sh->ir) { - ir_instruction *const inst = (ir_instruction *) node; - ir_variable *var; - ir_function *func; - - if ((func = inst->as_function()) != NULL) { - sh->symbols->add_function(func); - } else if ((var = inst->as_variable()) != NULL) { - sh->symbols->add_variable(var); - } - } -} - - -/** - * Remap variables referenced in an instruction tree - * - * This is used when instruction trees are cloned from one shader and placed in - * another. These trees will contain references to \c ir_variable nodes that - * do not exist in the target shader. This function finds these \c ir_variable - * references and replaces the references with matching variables in the target - * shader. - * - * If there is no matching variable in the target shader, a clone of the - * \c ir_variable is made and added to the target shader. The new variable is - * added to \b both the instruction stream and the symbol table. - * - * \param inst IR tree that is to be processed. - * \param symbols Symbol table containing global scope symbols in the - * linked shader. - * \param instructions Instruction stream where new variable declarations - * should be added. - */ -void -remap_variables(ir_instruction *inst, struct gl_shader *target, - hash_table *temps) -{ - class remap_visitor : public ir_hierarchical_visitor { - public: - remap_visitor(struct gl_shader *target, - hash_table *temps) - { - this->target = target; - this->symbols = target->symbols; - this->instructions = target->ir; - this->temps = temps; - } - - virtual ir_visitor_status visit(ir_dereference_variable *ir) - { - if (ir->var->mode == ir_var_temporary) { - ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var); - - assert(var != NULL); - ir->var = var; - return visit_continue; - } - - ir_variable *const existing = - this->symbols->get_variable(ir->var->name); - if (existing != NULL) - ir->var = existing; - else { - ir_variable *copy = ir->var->clone(this->target, NULL); - - this->symbols->add_variable(copy); - this->instructions->push_head(copy); - ir->var = copy; - } - - return visit_continue; - } - - private: - struct gl_shader *target; - glsl_symbol_table *symbols; - exec_list *instructions; - hash_table *temps; - }; - - remap_visitor v(target, temps); - - inst->accept(&v); -} - - -/** - * Move non-declarations from one instruction stream to another - * - * The intended usage pattern of this function is to pass the pointer to the - * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node - * pointer) for \c last and \c false for \c make_copies on the first - * call. Successive calls pass the return value of the previous call for - * \c last and \c true for \c make_copies. - * - * \param instructions Source instruction stream - * \param last Instruction after which new instructions should be - * inserted in the target instruction stream - * \param make_copies Flag selecting whether instructions in \c instructions - * should be copied (via \c ir_instruction::clone) into the - * target list or moved. - * - * \return - * The new "last" instruction in the target instruction stream. This pointer - * is suitable for use as the \c last parameter of a later call to this - * function. - */ -exec_node * -move_non_declarations(exec_list *instructions, exec_node *last, - bool make_copies, gl_shader *target) -{ - hash_table *temps = NULL; - - if (make_copies) - temps = hash_table_ctor(0, hash_table_pointer_hash, - hash_table_pointer_compare); - - foreach_list_safe(node, instructions) { - ir_instruction *inst = (ir_instruction *) node; - - if (inst->as_function()) - continue; - - ir_variable *var = inst->as_variable(); - if ((var != NULL) && (var->mode != ir_var_temporary)) - continue; - - assert(inst->as_assignment() - || ((var != NULL) && (var->mode == ir_var_temporary))); - - if (make_copies) { - inst = inst->clone(target, NULL); - - if (var != NULL) - hash_table_insert(temps, inst, var); - else - remap_variables(inst, target, temps); - } else { - inst->remove(); - } - - last->insert_after(inst); - last = inst; - } - - if (make_copies) - hash_table_dtor(temps); - - return last; -} - -/** - * Get the function signature for main from a shader - */ -static ir_function_signature * -get_main_function_signature(gl_shader *sh) -{ - ir_function *const f = sh->symbols->get_function("main"); - if (f != NULL) { - exec_list void_parameters; - - /* Look for the 'void main()' signature and ensure that it's defined. - * This keeps the linker from accidentally pick a shader that just - * contains a prototype for main. - * - * We don't have to check for multiple definitions of main (in multiple - * shaders) because that would have already been caught above. - */ - ir_function_signature *sig = f->matching_signature(&void_parameters); - if ((sig != NULL) && sig->is_defined) { - return sig; - } - } - - return NULL; -} - - -/** - * Combine a group of shaders for a single stage to generate a linked shader - * - * \note - * If this function is supplied a single shader, it is cloned, and the new - * shader is returned. - */ -static struct gl_shader * -link_intrastage_shaders(void *mem_ctx, - struct gl_context *ctx, - struct gl_shader_program *prog, - struct gl_shader **shader_list, - unsigned num_shaders) -{ - /* Check that global variables defined in multiple shaders are consistent. - */ - if (!cross_validate_globals(prog, shader_list, num_shaders, false)) - return NULL; - - /* Check that there is only a single definition of each function signature - * across all shaders. - */ - for (unsigned i = 0; i < (num_shaders - 1); i++) { - foreach_list(node, shader_list[i]->ir) { - ir_function *const f = ((ir_instruction *) node)->as_function(); - - if (f == NULL) - continue; - - for (unsigned j = i + 1; j < num_shaders; j++) { - ir_function *const other = - shader_list[j]->symbols->get_function(f->name); - - /* If the other shader has no function (and therefore no function - * signatures) with the same name, skip to the next shader. - */ - if (other == NULL) - continue; - - foreach_iter (exec_list_iterator, iter, *f) { - ir_function_signature *sig = - (ir_function_signature *) iter.get(); - - if (!sig->is_defined || sig->is_builtin) - continue; - - ir_function_signature *other_sig = - other->exact_matching_signature(& sig->parameters); - - if ((other_sig != NULL) && other_sig->is_defined - && !other_sig->is_builtin) { - linker_error_printf(prog, - "function `%s' is multiply defined", - f->name); - return NULL; - } - } - } - } - } - - /* Find the shader that defines main, and make a clone of it. - * - * Starting with the clone, search for undefined references. If one is - * found, find the shader that defines it. Clone the reference and add - * it to the shader. Repeat until there are no undefined references or - * until a reference cannot be resolved. - */ - gl_shader *main = NULL; - for (unsigned i = 0; i < num_shaders; i++) { - if (get_main_function_signature(shader_list[i]) != NULL) { - main = shader_list[i]; - break; - } - } - - if (main == NULL) { - linker_error_printf(prog, "%s shader lacks `main'\n", - (shader_list[0]->Type == GL_VERTEX_SHADER) - ? "vertex" : "fragment"); - return NULL; - } - - gl_shader *linked = ctx->Driver.NewShader(NULL, 0, main->Type); - linked->ir = new(linked) exec_list; - clone_ir_list(mem_ctx, linked->ir, main->ir); - - populate_symbol_table(linked); - - /* The a pointer to the main function in the final linked shader (i.e., the - * copy of the original shader that contained the main function). - */ - ir_function_signature *const main_sig = get_main_function_signature(linked); - - /* Move any instructions other than variable declarations or function - * declarations into main. - */ - exec_node *insertion_point = - move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false, - linked); - - for (unsigned i = 0; i < num_shaders; i++) { - if (shader_list[i] == main) - continue; - - insertion_point = move_non_declarations(shader_list[i]->ir, - insertion_point, true, linked); - } - - /* Resolve initializers for global variables in the linked shader. - */ - unsigned num_linking_shaders = num_shaders; - for (unsigned i = 0; i < num_shaders; i++) - num_linking_shaders += shader_list[i]->num_builtins_to_link; - - gl_shader **linking_shaders = - (gl_shader **) calloc(num_linking_shaders, sizeof(gl_shader *)); - - memcpy(linking_shaders, shader_list, - sizeof(linking_shaders[0]) * num_shaders); - - unsigned idx = num_shaders; - for (unsigned i = 0; i < num_shaders; i++) { - memcpy(&linking_shaders[idx], shader_list[i]->builtins_to_link, - sizeof(linking_shaders[0]) * shader_list[i]->num_builtins_to_link); - idx += shader_list[i]->num_builtins_to_link; - } - - assert(idx == num_linking_shaders); - - if (!link_function_calls(prog, linked, linking_shaders, - num_linking_shaders)) { - ctx->Driver.DeleteShader(ctx, linked); - linked = NULL; - } - - free(linking_shaders); - - /* Make a pass over all variable declarations to ensure that arrays with - * unspecified sizes have a size specified. The size is inferred from the - * max_array_access field. - */ - if (linked != NULL) { - class array_sizing_visitor : public ir_hierarchical_visitor { - public: - virtual ir_visitor_status visit(ir_variable *var) - { - if (var->type->is_array() && (var->type->length == 0)) { - const glsl_type *type = - glsl_type::get_array_instance(var->type->fields.array, - var->max_array_access); - - assert(type != NULL); - var->type = type; - } - - return visit_continue; - } - } v; - - v.run(linked->ir); - } - - return linked; -} - - -struct uniform_node { - exec_node link; - struct gl_uniform *u; - unsigned slots; -}; - -/** - * Update the sizes of linked shader uniform arrays to the maximum - * array index used. - * - * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec: - * - * If one or more elements of an array are active, - * GetActiveUniform will return the name of the array in name, - * subject to the restrictions listed above. The type of the array - * is returned in type. The size parameter contains the highest - * array element index used, plus one. The compiler or linker - * determines the highest index used. There will be only one - * active uniform reported by the GL per uniform array. - - */ -static void -update_array_sizes(struct gl_shader_program *prog) -{ - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) - continue; - - foreach_list(node, prog->_LinkedShaders[i]->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - if ((var == NULL) || (var->mode != ir_var_uniform && - var->mode != ir_var_in && - var->mode != ir_var_out) || - !var->type->is_array()) - continue; - - unsigned int size = var->max_array_access; - for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) { - if (prog->_LinkedShaders[j] == NULL) - continue; - - foreach_list(node2, prog->_LinkedShaders[j]->ir) { - ir_variable *other_var = ((ir_instruction *) node2)->as_variable(); - if (!other_var) - continue; - - if (strcmp(var->name, other_var->name) == 0 && - other_var->max_array_access > size) { - size = other_var->max_array_access; - } - } - } - - if (size + 1 != var->type->fields.array->length) { - var->type = glsl_type::get_array_instance(var->type->fields.array, - size + 1); - /* FINISHME: We should update the types of array - * dereferences of this variable now. - */ - } - } - } -} - -static void -add_uniform(void *mem_ctx, exec_list *uniforms, struct hash_table *ht, - const char *name, const glsl_type *type, GLenum shader_type, - unsigned *next_shader_pos, unsigned *total_uniforms) -{ - if (type->is_record()) { - for (unsigned int i = 0; i < type->length; i++) { - const glsl_type *field_type = type->fields.structure[i].type; - char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name, - type->fields.structure[i].name); - - add_uniform(mem_ctx, uniforms, ht, field_name, field_type, - shader_type, next_shader_pos, total_uniforms); - } - } else { - uniform_node *n = (uniform_node *) hash_table_find(ht, name); - unsigned int vec4_slots; - const glsl_type *array_elem_type = NULL; - - if (type->is_array()) { - array_elem_type = type->fields.array; - /* Array of structures. */ - if (array_elem_type->is_record()) { - for (unsigned int i = 0; i < type->length; i++) { - char *elem_name = ralloc_asprintf(mem_ctx, "%s[%d]", name, i); - add_uniform(mem_ctx, uniforms, ht, elem_name, array_elem_type, - shader_type, next_shader_pos, total_uniforms); - } - return; - } - } - - /* Fix the storage size of samplers at 1 vec4 each. Be sure to pad out - * vectors to vec4 slots. - */ - if (type->is_array()) { - if (array_elem_type->is_sampler()) - vec4_slots = type->length; - else - vec4_slots = type->length * array_elem_type->matrix_columns; - } else if (type->is_sampler()) { - vec4_slots = 1; - } else { - vec4_slots = type->matrix_columns; - } - - if (n == NULL) { - n = (uniform_node *) calloc(1, sizeof(struct uniform_node)); - n->u = (gl_uniform *) calloc(1, sizeof(struct gl_uniform)); - n->slots = vec4_slots; - - n->u->Name = strdup(name); - n->u->Type = type; - n->u->VertPos = -1; - n->u->FragPos = -1; - n->u->GeomPos = -1; - (*total_uniforms)++; - - hash_table_insert(ht, n, name); - uniforms->push_tail(& n->link); - } - - switch (shader_type) { - case GL_VERTEX_SHADER: - n->u->VertPos = *next_shader_pos; - break; - case GL_FRAGMENT_SHADER: - n->u->FragPos = *next_shader_pos; - break; - case GL_GEOMETRY_SHADER: - n->u->GeomPos = *next_shader_pos; - break; - } - - (*next_shader_pos) += vec4_slots; - } -} - -void -assign_uniform_locations(struct gl_shader_program *prog) -{ - /* */ - exec_list uniforms; - unsigned total_uniforms = 0; - hash_table *ht = hash_table_ctor(32, hash_table_string_hash, - hash_table_string_compare); - void *mem_ctx = ralloc_context(NULL); - - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) - continue; - - unsigned next_position = 0; - - foreach_list(node, prog->_LinkedShaders[i]->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - if ((var == NULL) || (var->mode != ir_var_uniform)) - continue; - - if (strncmp(var->name, "gl_", 3) == 0) { - /* At the moment, we don't allocate uniform locations for - * builtin uniforms. It's permitted by spec, and we'll - * likely switch to doing that at some point, but not yet. - */ - continue; - } - - var->location = next_position; - add_uniform(mem_ctx, &uniforms, ht, var->name, var->type, - prog->_LinkedShaders[i]->Type, - &next_position, &total_uniforms); - } - } - - ralloc_free(mem_ctx); - - gl_uniform_list *ul = (gl_uniform_list *) - calloc(1, sizeof(gl_uniform_list)); - - ul->Size = total_uniforms; - ul->NumUniforms = total_uniforms; - ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform)); - - unsigned idx = 0; - uniform_node *next; - for (uniform_node *node = (uniform_node *) uniforms.head - ; node->link.next != NULL - ; node = next) { - next = (uniform_node *) node->link.next; - - node->link.remove(); - memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform)); - idx++; - - free(node->u); - free(node); - } - - hash_table_dtor(ht); - - prog->Uniforms = ul; -} - - -/** - * Find a contiguous set of available bits in a bitmask - * - * \param used_mask Bits representing used (1) and unused (0) locations - * \param needed_count Number of contiguous bits needed. - * - * \return - * Base location of the available bits on success or -1 on failure. - */ -int -find_available_slots(unsigned used_mask, unsigned needed_count) -{ - unsigned needed_mask = (1 << needed_count) - 1; - const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count; - - /* The comparison to 32 is redundant, but without it GCC emits "warning: - * cannot optimize possibly infinite loops" for the loop below. - */ - if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32)) - return -1; - - for (int i = 0; i <= max_bit_to_test; i++) { - if ((needed_mask & ~used_mask) == needed_mask) - return i; - - needed_mask <<= 1; - } - - return -1; -} - - -bool -assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index) -{ - /* Mark invalid attribute locations as being used. - */ - unsigned used_locations = (max_attribute_index >= 32) - ? ~0 : ~((1 << max_attribute_index) - 1); - - gl_shader *const sh = prog->_LinkedShaders[0]; - assert(sh->Type == GL_VERTEX_SHADER); - - /* Operate in a total of four passes. - * - * 1. Invalidate the location assignments for all vertex shader inputs. - * - * 2. Assign locations for inputs that have user-defined (via - * glBindVertexAttribLocation) locatoins. - * - * 3. Sort the attributes without assigned locations by number of slots - * required in decreasing order. Fragmentation caused by attribute - * locations assigned by the application may prevent large attributes - * from having enough contiguous space. - * - * 4. Assign locations to any inputs without assigned locations. - */ - - invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0); - - if (prog->Attributes != NULL) { - for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) { - ir_variable *const var = - sh->symbols->get_variable(prog->Attributes->Parameters[i].Name); - - /* Note: attributes that occupy multiple slots, such as arrays or - * matrices, may appear in the attrib array multiple times. - */ - if ((var == NULL) || (var->location != -1)) - continue; - - /* From page 61 of the OpenGL 4.0 spec: - * - * "LinkProgram will fail if the attribute bindings assigned by - * BindAttribLocation do not leave not enough space to assign a - * location for an active matrix attribute or an active attribute - * array, both of which require multiple contiguous generic - * attributes." - * - * Previous versions of the spec contain similar language but omit the - * bit about attribute arrays. - * - * Page 61 of the OpenGL 4.0 spec also says: - * - * "It is possible for an application to bind more than one - * attribute name to the same location. This is referred to as - * aliasing. This will only work if only one of the aliased - * attributes is active in the executable program, or if no path - * through the shader consumes more than one attribute of a set - * of attributes aliased to the same location. A link error can - * occur if the linker determines that every path through the - * shader consumes multiple aliased attributes, but - * implementations are not required to generate an error in this - * case." - * - * These two paragraphs are either somewhat contradictory, or I don't - * fully understand one or both of them. - */ - /* FINISHME: The code as currently written does not support attribute - * FINISHME: location aliasing (see comment above). - */ - const int attr = prog->Attributes->Parameters[i].StateIndexes[0]; - const unsigned slots = count_attribute_slots(var->type); - - /* Mask representing the contiguous slots that will be used by this - * attribute. - */ - const unsigned use_mask = (1 << slots) - 1; - - /* Generate a link error if the set of bits requested for this - * attribute overlaps any previously allocated bits. - */ - if ((~(use_mask << attr) & used_locations) != used_locations) { - linker_error_printf(prog, - "insufficient contiguous attribute locations " - "available for vertex shader input `%s'", - var->name); - return false; - } - - var->location = VERT_ATTRIB_GENERIC0 + attr; - used_locations |= (use_mask << attr); - } - } - - /* Temporary storage for the set of attributes that need locations assigned. - */ - struct temp_attr { - unsigned slots; - ir_variable *var; - - /* Used below in the call to qsort. */ - static int compare(const void *a, const void *b) - { - const temp_attr *const l = (const temp_attr *) a; - const temp_attr *const r = (const temp_attr *) b; - - /* Reversed because we want a descending order sort below. */ - return r->slots - l->slots; - } - } to_assign[16]; - - unsigned num_attr = 0; - - foreach_list(node, sh->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - if ((var == NULL) || (var->mode != ir_var_in)) - continue; - - if (var->explicit_location) { - const unsigned slots = count_attribute_slots(var->type); - const unsigned use_mask = (1 << slots) - 1; - const int attr = var->location - VERT_ATTRIB_GENERIC0; - - if ((var->location >= (int)(max_attribute_index + VERT_ATTRIB_GENERIC0)) - || (var->location < 0)) { - linker_error_printf(prog, - "invalid explicit location %d specified for " - "`%s'\n", - (var->location < 0) ? var->location : attr, - var->name); - return false; - } else if (var->location >= VERT_ATTRIB_GENERIC0) { - used_locations |= (use_mask << attr); - } - } - - /* The location was explicitly assigned, nothing to do here. - */ - if (var->location != -1) - continue; - - to_assign[num_attr].slots = count_attribute_slots(var->type); - to_assign[num_attr].var = var; - num_attr++; - } - - /* If all of the attributes were assigned locations by the application (or - * are built-in attributes with fixed locations), return early. This should - * be the common case. - */ - if (num_attr == 0) - return true; - - qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare); - - /* VERT_ATTRIB_GENERIC0 is a psdueo-alias for VERT_ATTRIB_POS. It can only - * be explicitly assigned by via glBindAttribLocation. Mark it as reserved - * to prevent it from being automatically allocated below. - */ - find_deref_visitor find("gl_Vertex"); - find.run(sh->ir); - if (find.variable_found()) - used_locations |= (1 << 0); - - for (unsigned i = 0; i < num_attr; i++) { - /* Mask representing the contiguous slots that will be used by this - * attribute. - */ - const unsigned use_mask = (1 << to_assign[i].slots) - 1; - - int location = find_available_slots(used_locations, to_assign[i].slots); - - if (location < 0) { - linker_error_printf(prog, - "insufficient contiguous attribute locations " - "available for vertex shader input `%s'", - to_assign[i].var->name); - return false; - } - - to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location; - used_locations |= (use_mask << location); - } - - return true; -} - - -/** - * Demote shader inputs and outputs that are not used in other stages - */ -void -demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode) -{ - foreach_list(node, sh->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - if ((var == NULL) || (var->mode != int(mode))) - continue; - - /* A shader 'in' or 'out' variable is only really an input or output if - * its value is used by other shader stages. This will cause the variable - * to have a location assigned. - */ - if (var->location == -1) { - var->mode = ir_var_auto; - } - } -} - - -void -assign_varying_locations(struct gl_shader_program *prog, - gl_shader *producer, gl_shader *consumer) -{ - /* FINISHME: Set dynamically when geometry shader support is added. */ - unsigned output_index = VERT_RESULT_VAR0; - unsigned input_index = FRAG_ATTRIB_VAR0; - - /* Operate in a total of three passes. - * - * 1. Assign locations for any matching inputs and outputs. - * - * 2. Mark output variables in the producer that do not have locations as - * not being outputs. This lets the optimizer eliminate them. - * - * 3. Mark input variables in the consumer that do not have locations as - * not being inputs. This lets the optimizer eliminate them. - */ - - invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0); - invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0); - - foreach_list(node, producer->ir) { - ir_variable *const output_var = ((ir_instruction *) node)->as_variable(); - - if ((output_var == NULL) || (output_var->mode != ir_var_out) - || (output_var->location != -1)) - continue; - - ir_variable *const input_var = - consumer->symbols->get_variable(output_var->name); - - if ((input_var == NULL) || (input_var->mode != ir_var_in)) - continue; - - assert(input_var->location == -1); - - output_var->location = output_index; - input_var->location = input_index; - - /* FINISHME: Support for "varying" records in GLSL 1.50. */ - assert(!output_var->type->is_record()); - - if (output_var->type->is_array()) { - const unsigned slots = output_var->type->length - * output_var->type->fields.array->matrix_columns; - - output_index += slots; - input_index += slots; - } else { - const unsigned slots = output_var->type->matrix_columns; - - output_index += slots; - input_index += slots; - } - } - - foreach_list(node, consumer->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); - - if ((var == NULL) || (var->mode != ir_var_in)) - continue; - - if (var->location == -1) { - if (prog->Version <= 120) { - /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec: - * - * Only those varying variables used (i.e. read) in - * the fragment shader executable must be written to - * by the vertex shader executable; declaring - * superfluous varying variables in a vertex shader is - * permissible. - * - * We interpret this text as meaning that the VS must - * write the variable for the FS to read it. See - * "glsl1-varying read but not written" in piglit. - */ - - linker_error_printf(prog, "fragment shader varying %s not written " - "by vertex shader\n.", var->name); - prog->LinkStatus = false; - } - - /* An 'in' variable is only really a shader input if its - * value is written by the previous stage. - */ - var->mode = ir_var_auto; - } - } -} - - -void -link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) -{ - void *mem_ctx = ralloc_context(NULL); // temporary linker context - - prog->LinkStatus = false; - prog->Validated = false; - prog->_Used = false; - - if (prog->InfoLog != NULL) - ralloc_free(prog->InfoLog); - - prog->InfoLog = ralloc_strdup(NULL, ""); - - /* Separate the shaders into groups based on their type. - */ - struct gl_shader **vert_shader_list; - unsigned num_vert_shaders = 0; - struct gl_shader **frag_shader_list; - unsigned num_frag_shaders = 0; - - vert_shader_list = (struct gl_shader **) - calloc(2 * prog->NumShaders, sizeof(struct gl_shader *)); - frag_shader_list = &vert_shader_list[prog->NumShaders]; - - unsigned min_version = UINT_MAX; - unsigned max_version = 0; - for (unsigned i = 0; i < prog->NumShaders; i++) { - min_version = MIN2(min_version, prog->Shaders[i]->Version); - max_version = MAX2(max_version, prog->Shaders[i]->Version); - - switch (prog->Shaders[i]->Type) { - case GL_VERTEX_SHADER: - vert_shader_list[num_vert_shaders] = prog->Shaders[i]; - num_vert_shaders++; - break; - case GL_FRAGMENT_SHADER: - frag_shader_list[num_frag_shaders] = prog->Shaders[i]; - num_frag_shaders++; - break; - case GL_GEOMETRY_SHADER: - /* FINISHME: Support geometry shaders. */ - assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER); - break; - } - } - - /* Previous to GLSL version 1.30, different compilation units could mix and - * match shading language versions. With GLSL 1.30 and later, the versions - * of all shaders must match. - */ - assert(min_version >= 100); - assert(max_version <= 130); - if ((max_version >= 130 || min_version == 100) - && min_version != max_version) { - linker_error_printf(prog, "all shaders must use same shading " - "language version\n"); - goto done; - } - - prog->Version = max_version; - - for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] != NULL) - ctx->Driver.DeleteShader(ctx, prog->_LinkedShaders[i]); - - prog->_LinkedShaders[i] = NULL; - } - - /* Link all shaders for a particular stage and validate the result. - */ - if (num_vert_shaders > 0) { - gl_shader *const sh = - link_intrastage_shaders(mem_ctx, ctx, prog, vert_shader_list, - num_vert_shaders); - - if (sh == NULL) - goto done; - - if (!validate_vertex_shader_executable(prog, sh)) - goto done; - - _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_VERTEX], - sh); - } - - if (num_frag_shaders > 0) { - gl_shader *const sh = - link_intrastage_shaders(mem_ctx, ctx, prog, frag_shader_list, - num_frag_shaders); - - if (sh == NULL) - goto done; - - if (!validate_fragment_shader_executable(prog, sh)) - goto done; - - _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_FRAGMENT], - sh); - } - - /* Here begins the inter-stage linking phase. Some initial validation is - * performed, then locations are assigned for uniforms, attributes, and - * varyings. - */ - if (cross_validate_uniforms(prog)) { - unsigned prev; - - for (prev = 0; prev < MESA_SHADER_TYPES; prev++) { - if (prog->_LinkedShaders[prev] != NULL) - break; - } - - /* Validate the inputs of each stage with the output of the preceeding - * stage. - */ - for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) - continue; - - if (!cross_validate_outputs_to_inputs(prog, - prog->_LinkedShaders[prev], - prog->_LinkedShaders[i])) - goto done; - - prev = i; - } - - prog->LinkStatus = true; - } - - /* Do common optimization before assigning storage for attributes, - * uniforms, and varyings. Later optimization could possibly make - * some of that unused. - */ - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) - continue; - - while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, 32)) - ; - } - - update_array_sizes(prog); - - assign_uniform_locations(prog); - - if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) { - /* FINISHME: The value of the max_attribute_index parameter is - * FINISHME: implementation dependent based on the value of - * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be - * FINISHME: at least 16, so hardcode 16 for now. - */ - if (!assign_attribute_locations(prog, 16)) { - prog->LinkStatus = false; - goto done; - } - } - - unsigned prev; - for (prev = 0; prev < MESA_SHADER_TYPES; prev++) { - if (prog->_LinkedShaders[prev] != NULL) - break; - } - - for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) - continue; - - assign_varying_locations(prog, - prog->_LinkedShaders[prev], - prog->_LinkedShaders[i]); - prev = i; - } - - if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) { - demote_shader_inputs_and_outputs(prog->_LinkedShaders[MESA_SHADER_VERTEX], - ir_var_out); - } - - if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) { - gl_shader *const sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY]; - - demote_shader_inputs_and_outputs(sh, ir_var_in); - demote_shader_inputs_and_outputs(sh, ir_var_inout); - demote_shader_inputs_and_outputs(sh, ir_var_out); - } - - if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] != NULL) { - gl_shader *const sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]; - - demote_shader_inputs_and_outputs(sh, ir_var_in); - } - - /* OpenGL ES requires that a vertex shader and a fragment shader both be - * present in a linked program. By checking for use of shading language - * version 1.00, we also catch the GL_ARB_ES2_compatibility case. - */ - if (ctx->API == API_OPENGLES2 || prog->Version == 100) { - if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) { - linker_error_printf(prog, "program lacks a vertex shader\n"); - prog->LinkStatus = false; - } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) { - linker_error_printf(prog, "program lacks a fragment shader\n"); - prog->LinkStatus = false; - } - } - - /* FINISHME: Assign fragment shader output locations. */ - -done: - free(vert_shader_list); - - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) - continue; - - /* Retain any live IR, but trash the rest. */ - reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir); - } - - ralloc_free(mem_ctx); -} +/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file linker.cpp
+ * GLSL linker implementation
+ *
+ * Given a set of shaders that are to be linked to generate a final program,
+ * there are three distinct stages.
+ *
+ * In the first stage shaders are partitioned into groups based on the shader
+ * type. All shaders of a particular type (e.g., vertex shaders) are linked
+ * together.
+ *
+ * - Undefined references in each shader are resolve to definitions in
+ * another shader.
+ * - Types and qualifiers of uniforms, outputs, and global variables defined
+ * in multiple shaders with the same name are verified to be the same.
+ * - Initializers for uniforms and global variables defined
+ * in multiple shaders with the same name are verified to be the same.
+ *
+ * The result, in the terminology of the GLSL spec, is a set of shader
+ * executables for each processing unit.
+ *
+ * After the first stage is complete, a series of semantic checks are performed
+ * on each of the shader executables.
+ *
+ * - Each shader executable must define a \c main function.
+ * - Each vertex shader executable must write to \c gl_Position.
+ * - Each fragment shader executable must write to either \c gl_FragData or
+ * \c gl_FragColor.
+ *
+ * In the final stage individual shader executables are linked to create a
+ * complete exectuable.
+ *
+ * - Types of uniforms defined in multiple shader stages with the same name
+ * are verified to be the same.
+ * - Initializers for uniforms defined in multiple shader stages with the
+ * same name are verified to be the same.
+ * - Types and qualifiers of outputs defined in one stage are verified to
+ * be the same as the types and qualifiers of inputs defined with the same
+ * name in a later stage.
+ *
+ * \author Ian Romanick <ian.d.romanick@intel.com>
+ */
+#include <cstdlib>
+#include <cstdio>
+#include <cstdarg>
+#include <climits>
+
+#include "main/core.h"
+#include "glsl_symbol_table.h"
+#include "ir.h"
+#include "program.h"
+#include "program/hash_table.h"
+#include "linker.h"
+#include "ir_optimization.h"
+
+extern "C" {
+#include "main/shaderobj.h"
+}
+
+/**
+ * Visitor that determines whether or not a variable is ever written.
+ */
+class find_assignment_visitor : public ir_hierarchical_visitor {
+public:
+ find_assignment_visitor(const char *name)
+ : name(name), found(false)
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit_enter(ir_assignment *ir)
+ {
+ ir_variable *const var = ir->lhs->variable_referenced();
+
+ if (strcmp(name, var->name) == 0) {
+ found = true;
+ return visit_stop;
+ }
+
+ return visit_continue_with_parent;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_call *ir)
+ {
+ exec_list_iterator sig_iter = ir->get_callee()->parameters.iterator();
+ foreach_iter(exec_list_iterator, iter, *ir) {
+ ir_rvalue *param_rval = (ir_rvalue *)iter.get();
+ ir_variable *sig_param = (ir_variable *)sig_iter.get();
+
+ if (sig_param->mode == ir_var_out ||
+ sig_param->mode == ir_var_inout) {
+ ir_variable *var = param_rval->variable_referenced();
+ if (var && strcmp(name, var->name) == 0) {
+ found = true;
+ return visit_stop;
+ }
+ }
+ sig_iter.next();
+ }
+
+ return visit_continue_with_parent;
+ }
+
+ bool variable_found()
+ {
+ return found;
+ }
+
+private:
+ const char *name; /**< Find writes to a variable with this name. */
+ bool found; /**< Was a write to the variable found? */
+};
+
+
+/**
+ * Visitor that determines whether or not a variable is ever read.
+ */
+class find_deref_visitor : public ir_hierarchical_visitor {
+public:
+ find_deref_visitor(const char *name)
+ : name(name), found(false)
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ if (strcmp(this->name, ir->var->name) == 0) {
+ this->found = true;
+ return visit_stop;
+ }
+
+ return visit_continue;
+ }
+
+ bool variable_found() const
+ {
+ return this->found;
+ }
+
+private:
+ const char *name; /**< Find writes to a variable with this name. */
+ bool found; /**< Was a write to the variable found? */
+};
+
+
+void
+linker_error_printf(gl_shader_program *prog, const char *fmt, ...)
+{
+ va_list ap;
+
+ ralloc_strcat(&prog->InfoLog, "error: ");
+ va_start(ap, fmt);
+ ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
+ va_end(ap);
+}
+
+
+void
+invalidate_variable_locations(gl_shader *sh, enum ir_variable_mode mode,
+ int generic_base)
+{
+ foreach_list(node, sh->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->mode != (unsigned) mode))
+ continue;
+
+ /* Only assign locations for generic attributes / varyings / etc.
+ */
+ if ((var->location >= generic_base) && !var->explicit_location)
+ var->location = -1;
+ }
+}
+
+
+/**
+ * Determine the number of attribute slots required for a particular type
+ *
+ * This code is here because it implements the language rules of a specific
+ * GLSL version. Since it's a property of the language and not a property of
+ * types in general, it doesn't really belong in glsl_type.
+ */
+unsigned
+count_attribute_slots(const glsl_type *t)
+{
+ /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "A scalar input counts the same amount against this limit as a vec4,
+ * so applications may want to consider packing groups of four
+ * unrelated float inputs together into a vector to better utilize the
+ * capabilities of the underlying hardware. A matrix input will use up
+ * multiple locations. The number of locations used will equal the
+ * number of columns in the matrix."
+ *
+ * The spec does not explicitly say how arrays are counted. However, it
+ * should be safe to assume the total number of slots consumed by an array
+ * is the number of entries in the array multiplied by the number of slots
+ * consumed by a single element of the array.
+ */
+
+ if (t->is_array())
+ return t->array_size() * count_attribute_slots(t->element_type());
+
+ if (t->is_matrix())
+ return t->matrix_columns;
+
+ return 1;
+}
+
+
+/**
+ * Verify that a vertex shader executable meets all semantic requirements
+ *
+ * \param shader Vertex shader executable to be verified
+ */
+bool
+validate_vertex_shader_executable(struct gl_shader_program *prog,
+ struct gl_shader *shader)
+{
+ if (shader == NULL)
+ return true;
+
+ find_assignment_visitor find("gl_Position");
+ find.run(shader->ir);
+ if (!find.variable_found()) {
+ linker_error_printf(prog,
+ "vertex shader does not write to `gl_Position'\n");
+ return false;
+ }
+
+ return true;
+}
+
+
+/**
+ * Verify that a fragment shader executable meets all semantic requirements
+ *
+ * \param shader Fragment shader executable to be verified
+ */
+bool
+validate_fragment_shader_executable(struct gl_shader_program *prog,
+ struct gl_shader *shader)
+{
+ if (shader == NULL)
+ return true;
+
+ find_assignment_visitor frag_color("gl_FragColor");
+ find_assignment_visitor frag_data("gl_FragData");
+
+ frag_color.run(shader->ir);
+ frag_data.run(shader->ir);
+
+ if (frag_color.variable_found() && frag_data.variable_found()) {
+ linker_error_printf(prog, "fragment shader writes to both "
+ "`gl_FragColor' and `gl_FragData'\n");
+ return false;
+ }
+
+ return true;
+}
+
+
+/**
+ * Generate a string describing the mode of a variable
+ */
+static const char *
+mode_string(const ir_variable *var)
+{
+ switch (var->mode) {
+ case ir_var_auto:
+ return (var->read_only) ? "global constant" : "global variable";
+
+ case ir_var_uniform: return "uniform";
+ case ir_var_in: return "shader input";
+ case ir_var_out: return "shader output";
+ case ir_var_inout: return "shader inout";
+
+ case ir_var_const_in:
+ case ir_var_temporary:
+ default:
+ assert(!"Should not get here.");
+ return "invalid variable";
+ }
+}
+
+
+/**
+ * Perform validation of global variables used across multiple shaders
+ */
+bool
+cross_validate_globals(struct gl_shader_program *prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders,
+ bool uniforms_only)
+{
+ /* Examine all of the uniforms in all of the shaders and cross validate
+ * them.
+ */
+ glsl_symbol_table variables;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == NULL)
+ continue;
+
+ foreach_list(node, shader_list[i]->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ if (var == NULL)
+ continue;
+
+ if (uniforms_only && (var->mode != ir_var_uniform))
+ continue;
+
+ /* Don't cross validate temporaries that are at global scope. These
+ * will eventually get pulled into the shaders 'main'.
+ */
+ if (var->mode == ir_var_temporary)
+ continue;
+
+ /* If a global with this name has already been seen, verify that the
+ * new instance has the same type. In addition, if the globals have
+ * initializers, the values of the initializers must be the same.
+ */
+ ir_variable *const existing = variables.get_variable(var->name);
+ if (existing != NULL) {
+ if (var->type != existing->type) {
+ /* Consider the types to be "the same" if both types are arrays
+ * of the same type and one of the arrays is implicitly sized.
+ * In addition, set the type of the linked variable to the
+ * explicitly sized array.
+ */
+ if (var->type->is_array()
+ && existing->type->is_array()
+ && (var->type->fields.array == existing->type->fields.array)
+ && ((var->type->length == 0)
+ || (existing->type->length == 0))) {
+ if (var->type->length != 0) {
+ existing->type = var->type;
+ }
+ } else {
+ linker_error_printf(prog, "%s `%s' declared as type "
+ "`%s' and type `%s'\n",
+ mode_string(var),
+ var->name, var->type->name,
+ existing->type->name);
+ return false;
+ }
+ }
+
+ if (var->explicit_location) {
+ if (existing->explicit_location
+ && (var->location != existing->location)) {
+ linker_error_printf(prog, "explicit locations for %s "
+ "`%s' have differing values\n",
+ mode_string(var), var->name);
+ return false;
+ }
+
+ existing->location = var->location;
+ existing->explicit_location = true;
+ }
+
+ /* Validate layout qualifiers for gl_FragDepth.
+ *
+ * From the AMD_conservative_depth spec:
+ * "If gl_FragDepth is redeclared in any fragment shader in
+ * a program, it must be redeclared in all fragment shaders in that
+ * program that have static assignments to gl_FragDepth. All
+ * redeclarations of gl_FragDepth in all fragment shaders in
+ * a single program must have the same set of qualifiers."
+ */
+ if (strcmp(var->name, "gl_FragDepth") == 0) {
+ bool layout_declared = var->depth_layout != ir_depth_layout_none;
+ bool layout_differs = var->depth_layout != existing->depth_layout;
+ if (layout_declared && layout_differs) {
+ linker_error_printf(prog,
+ "All redeclarations of gl_FragDepth in all fragment shaders "
+ "in a single program must have the same set of qualifiers.");
+ }
+ if (var->used && layout_differs) {
+ linker_error_printf(prog,
+ "If gl_FragDepth is redeclared with a layout qualifier in"
+ "any fragment shader, it must be redeclared with the same"
+ "layout qualifier in all fragment shaders that have"
+ "assignments to gl_FragDepth");
+ }
+ }
+
+ /* FINISHME: Handle non-constant initializers.
+ */
+ if (var->constant_value != NULL) {
+ if (existing->constant_value != NULL) {
+ if (!var->constant_value->has_value(existing->constant_value)) {
+ linker_error_printf(prog, "initializers for %s "
+ "`%s' have differing values\n",
+ mode_string(var), var->name);
+ return false;
+ }
+ } else
+ /* If the first-seen instance of a particular uniform did not
+ * have an initializer but a later instance does, copy the
+ * initializer to the version stored in the symbol table.
+ */
+ /* FINISHME: This is wrong. The constant_value field should
+ * FINISHME: not be modified! Imagine a case where a shader
+ * FINISHME: without an initializer is linked in two different
+ * FINISHME: programs with shaders that have differing
+ * FINISHME: initializers. Linking with the first will
+ * FINISHME: modify the shader, and linking with the second
+ * FINISHME: will fail.
+ */
+ existing->constant_value =
+ var->constant_value->clone(ralloc_parent(existing), NULL);
+ }
+
+ if (existing->invariant != var->invariant) {
+ linker_error_printf(prog, "declarations for %s `%s' have "
+ "mismatching invariant qualifiers\n",
+ mode_string(var), var->name);
+ return false;
+ }
+ if (existing->centroid != var->centroid) {
+ linker_error_printf(prog, "declarations for %s `%s' have "
+ "mismatching centroid qualifiers\n",
+ mode_string(var), var->name);
+ return false;
+ }
+ } else
+ variables.add_variable(var);
+ }
+ }
+
+ return true;
+}
+
+
+/**
+ * Perform validation of uniforms used across multiple shader stages
+ */
+bool
+cross_validate_uniforms(struct gl_shader_program *prog)
+{
+ return cross_validate_globals(prog, prog->_LinkedShaders,
+ MESA_SHADER_TYPES, true);
+}
+
+
+/**
+ * Validate that outputs from one stage match inputs of another
+ */
+bool
+cross_validate_outputs_to_inputs(struct gl_shader_program *prog,
+ gl_shader *producer, gl_shader *consumer)
+{
+ glsl_symbol_table parameters;
+ /* FINISHME: Figure these out dynamically. */
+ const char *const producer_stage = "vertex";
+ const char *const consumer_stage = "fragment";
+
+ /* Find all shader outputs in the "producer" stage.
+ */
+ foreach_list(node, producer->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ /* FINISHME: For geometry shaders, this should also look for inout
+ * FINISHME: variables.
+ */
+ if ((var == NULL) || (var->mode != ir_var_out))
+ continue;
+
+ parameters.add_variable(var);
+ }
+
+
+ /* Find all shader inputs in the "consumer" stage. Any variables that have
+ * matching outputs already in the symbol table must have the same type and
+ * qualifiers.
+ */
+ foreach_list(node, consumer->ir) {
+ ir_variable *const input = ((ir_instruction *) node)->as_variable();
+
+ /* FINISHME: For geometry shaders, this should also look for inout
+ * FINISHME: variables.
+ */
+ if ((input == NULL) || (input->mode != ir_var_in))
+ continue;
+
+ ir_variable *const output = parameters.get_variable(input->name);
+ if (output != NULL) {
+ /* Check that the types match between stages.
+ */
+ if (input->type != output->type) {
+ /* There is a bit of a special case for gl_TexCoord. This
+ * built-in is unsized by default. Appliations that variable
+ * access it must redeclare it with a size. There is some
+ * language in the GLSL spec that implies the fragment shader
+ * and vertex shader do not have to agree on this size. Other
+ * driver behave this way, and one or two applications seem to
+ * rely on it.
+ *
+ * Neither declaration needs to be modified here because the array
+ * sizes are fixed later when update_array_sizes is called.
+ *
+ * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "Unlike user-defined varying variables, the built-in
+ * varying variables don't have a strict one-to-one
+ * correspondence between the vertex language and the
+ * fragment language."
+ */
+ if (!output->type->is_array()
+ || (strncmp("gl_", output->name, 3) != 0)) {
+ linker_error_printf(prog,
+ "%s shader output `%s' declared as "
+ "type `%s', but %s shader input declared "
+ "as type `%s'\n",
+ producer_stage, output->name,
+ output->type->name,
+ consumer_stage, input->type->name);
+ return false;
+ }
+ }
+
+ /* Check that all of the qualifiers match between stages.
+ */
+ if (input->centroid != output->centroid) {
+ linker_error_printf(prog,
+ "%s shader output `%s' %s centroid qualifier, "
+ "but %s shader input %s centroid qualifier\n",
+ producer_stage,
+ output->name,
+ (output->centroid) ? "has" : "lacks",
+ consumer_stage,
+ (input->centroid) ? "has" : "lacks");
+ return false;
+ }
+
+ if (input->invariant != output->invariant) {
+ linker_error_printf(prog,
+ "%s shader output `%s' %s invariant qualifier, "
+ "but %s shader input %s invariant qualifier\n",
+ producer_stage,
+ output->name,
+ (output->invariant) ? "has" : "lacks",
+ consumer_stage,
+ (input->invariant) ? "has" : "lacks");
+ return false;
+ }
+
+ if (input->interpolation != output->interpolation) {
+ linker_error_printf(prog,
+ "%s shader output `%s' specifies %s "
+ "interpolation qualifier, "
+ "but %s shader input specifies %s "
+ "interpolation qualifier\n",
+ producer_stage,
+ output->name,
+ output->interpolation_string(),
+ consumer_stage,
+ input->interpolation_string());
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+
+/**
+ * Populates a shaders symbol table with all global declarations
+ */
+static void
+populate_symbol_table(gl_shader *sh)
+{
+ sh->symbols = new(sh) glsl_symbol_table;
+
+ foreach_list(node, sh->ir) {
+ ir_instruction *const inst = (ir_instruction *) node;
+ ir_variable *var;
+ ir_function *func;
+
+ if ((func = inst->as_function()) != NULL) {
+ sh->symbols->add_function(func);
+ } else if ((var = inst->as_variable()) != NULL) {
+ sh->symbols->add_variable(var);
+ }
+ }
+}
+
+
+/**
+ * Remap variables referenced in an instruction tree
+ *
+ * This is used when instruction trees are cloned from one shader and placed in
+ * another. These trees will contain references to \c ir_variable nodes that
+ * do not exist in the target shader. This function finds these \c ir_variable
+ * references and replaces the references with matching variables in the target
+ * shader.
+ *
+ * If there is no matching variable in the target shader, a clone of the
+ * \c ir_variable is made and added to the target shader. The new variable is
+ * added to \b both the instruction stream and the symbol table.
+ *
+ * \param inst IR tree that is to be processed.
+ * \param symbols Symbol table containing global scope symbols in the
+ * linked shader.
+ * \param instructions Instruction stream where new variable declarations
+ * should be added.
+ */
+void
+remap_variables(ir_instruction *inst, struct gl_shader *target,
+ hash_table *temps)
+{
+ class remap_visitor : public ir_hierarchical_visitor {
+ public:
+ remap_visitor(struct gl_shader *target,
+ hash_table *temps)
+ {
+ this->target = target;
+ this->symbols = target->symbols;
+ this->instructions = target->ir;
+ this->temps = temps;
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ if (ir->var->mode == ir_var_temporary) {
+ ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var);
+
+ assert(var != NULL);
+ ir->var = var;
+ return visit_continue;
+ }
+
+ ir_variable *const existing =
+ this->symbols->get_variable(ir->var->name);
+ if (existing != NULL)
+ ir->var = existing;
+ else {
+ ir_variable *copy = ir->var->clone(this->target, NULL);
+
+ this->symbols->add_variable(copy);
+ this->instructions->push_head(copy);
+ ir->var = copy;
+ }
+
+ return visit_continue;
+ }
+
+ private:
+ struct gl_shader *target;
+ glsl_symbol_table *symbols;
+ exec_list *instructions;
+ hash_table *temps;
+ };
+
+ remap_visitor v(target, temps);
+
+ inst->accept(&v);
+}
+
+
+/**
+ * Move non-declarations from one instruction stream to another
+ *
+ * The intended usage pattern of this function is to pass the pointer to the
+ * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
+ * pointer) for \c last and \c false for \c make_copies on the first
+ * call. Successive calls pass the return value of the previous call for
+ * \c last and \c true for \c make_copies.
+ *
+ * \param instructions Source instruction stream
+ * \param last Instruction after which new instructions should be
+ * inserted in the target instruction stream
+ * \param make_copies Flag selecting whether instructions in \c instructions
+ * should be copied (via \c ir_instruction::clone) into the
+ * target list or moved.
+ *
+ * \return
+ * The new "last" instruction in the target instruction stream. This pointer
+ * is suitable for use as the \c last parameter of a later call to this
+ * function.
+ */
+exec_node *
+move_non_declarations(exec_list *instructions, exec_node *last,
+ bool make_copies, gl_shader *target)
+{
+ hash_table *temps = NULL;
+
+ if (make_copies)
+ temps = hash_table_ctor(0, hash_table_pointer_hash,
+ hash_table_pointer_compare);
+
+ foreach_list_safe(node, instructions) {
+ ir_instruction *inst = (ir_instruction *) node;
+
+ if (inst->as_function())
+ continue;
+
+ ir_variable *var = inst->as_variable();
+ if ((var != NULL) && (var->mode != ir_var_temporary))
+ continue;
+
+ assert(inst->as_assignment()
+ || ((var != NULL) && (var->mode == ir_var_temporary)));
+
+ if (make_copies) {
+ inst = inst->clone(target, NULL);
+
+ if (var != NULL)
+ hash_table_insert(temps, inst, var);
+ else
+ remap_variables(inst, target, temps);
+ } else {
+ inst->remove();
+ }
+
+ last->insert_after(inst);
+ last = inst;
+ }
+
+ if (make_copies)
+ hash_table_dtor(temps);
+
+ return last;
+}
+
+/**
+ * Get the function signature for main from a shader
+ */
+static ir_function_signature *
+get_main_function_signature(gl_shader *sh)
+{
+ ir_function *const f = sh->symbols->get_function("main");
+ if (f != NULL) {
+ exec_list void_parameters;
+
+ /* Look for the 'void main()' signature and ensure that it's defined.
+ * This keeps the linker from accidentally pick a shader that just
+ * contains a prototype for main.
+ *
+ * We don't have to check for multiple definitions of main (in multiple
+ * shaders) because that would have already been caught above.
+ */
+ ir_function_signature *sig = f->matching_signature(&void_parameters);
+ if ((sig != NULL) && sig->is_defined) {
+ return sig;
+ }
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Combine a group of shaders for a single stage to generate a linked shader
+ *
+ * \note
+ * If this function is supplied a single shader, it is cloned, and the new
+ * shader is returned.
+ */
+static struct gl_shader *
+link_intrastage_shaders(void *mem_ctx,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ /* Check that global variables defined in multiple shaders are consistent.
+ */
+ if (!cross_validate_globals(prog, shader_list, num_shaders, false))
+ return NULL;
+
+ /* Check that there is only a single definition of each function signature
+ * across all shaders.
+ */
+ for (unsigned i = 0; i < (num_shaders - 1); i++) {
+ foreach_list(node, shader_list[i]->ir) {
+ ir_function *const f = ((ir_instruction *) node)->as_function();
+
+ if (f == NULL)
+ continue;
+
+ for (unsigned j = i + 1; j < num_shaders; j++) {
+ ir_function *const other =
+ shader_list[j]->symbols->get_function(f->name);
+
+ /* If the other shader has no function (and therefore no function
+ * signatures) with the same name, skip to the next shader.
+ */
+ if (other == NULL)
+ continue;
+
+ foreach_iter (exec_list_iterator, iter, *f) {
+ ir_function_signature *sig =
+ (ir_function_signature *) iter.get();
+
+ if (!sig->is_defined || sig->is_builtin)
+ continue;
+
+ ir_function_signature *other_sig =
+ other->exact_matching_signature(& sig->parameters);
+
+ if ((other_sig != NULL) && other_sig->is_defined
+ && !other_sig->is_builtin) {
+ linker_error_printf(prog,
+ "function `%s' is multiply defined",
+ f->name);
+ return NULL;
+ }
+ }
+ }
+ }
+ }
+
+ /* Find the shader that defines main, and make a clone of it.
+ *
+ * Starting with the clone, search for undefined references. If one is
+ * found, find the shader that defines it. Clone the reference and add
+ * it to the shader. Repeat until there are no undefined references or
+ * until a reference cannot be resolved.
+ */
+ gl_shader *main = NULL;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (get_main_function_signature(shader_list[i]) != NULL) {
+ main = shader_list[i];
+ break;
+ }
+ }
+
+ if (main == NULL) {
+ linker_error_printf(prog, "%s shader lacks `main'\n",
+ (shader_list[0]->Type == GL_VERTEX_SHADER)
+ ? "vertex" : "fragment");
+ return NULL;
+ }
+
+ gl_shader *linked = ctx->Driver.NewShader(NULL, 0, main->Type);
+ linked->ir = new(linked) exec_list;
+ clone_ir_list(mem_ctx, linked->ir, main->ir);
+
+ populate_symbol_table(linked);
+
+ /* The a pointer to the main function in the final linked shader (i.e., the
+ * copy of the original shader that contained the main function).
+ */
+ ir_function_signature *const main_sig = get_main_function_signature(linked);
+
+ /* Move any instructions other than variable declarations or function
+ * declarations into main.
+ */
+ exec_node *insertion_point =
+ move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
+ linked);
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == main)
+ continue;
+
+ insertion_point = move_non_declarations(shader_list[i]->ir,
+ insertion_point, true, linked);
+ }
+
+ /* Resolve initializers for global variables in the linked shader.
+ */
+ unsigned num_linking_shaders = num_shaders;
+ for (unsigned i = 0; i < num_shaders; i++)
+ num_linking_shaders += shader_list[i]->num_builtins_to_link;
+
+ gl_shader **linking_shaders =
+ (gl_shader **) calloc(num_linking_shaders, sizeof(gl_shader *));
+
+ memcpy(linking_shaders, shader_list,
+ sizeof(linking_shaders[0]) * num_shaders);
+
+ unsigned idx = num_shaders;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ memcpy(&linking_shaders[idx], shader_list[i]->builtins_to_link,
+ sizeof(linking_shaders[0]) * shader_list[i]->num_builtins_to_link);
+ idx += shader_list[i]->num_builtins_to_link;
+ }
+
+ assert(idx == num_linking_shaders);
+
+ if (!link_function_calls(prog, linked, linking_shaders,
+ num_linking_shaders)) {
+ ctx->Driver.DeleteShader(ctx, linked);
+ linked = NULL;
+ }
+
+ free(linking_shaders);
+
+ /* Make a pass over all variable declarations to ensure that arrays with
+ * unspecified sizes have a size specified. The size is inferred from the
+ * max_array_access field.
+ */
+ if (linked != NULL) {
+ class array_sizing_visitor : public ir_hierarchical_visitor {
+ public:
+ virtual ir_visitor_status visit(ir_variable *var)
+ {
+ if (var->type->is_array() && (var->type->length == 0)) {
+ const glsl_type *type =
+ glsl_type::get_array_instance(var->type->fields.array,
+ var->max_array_access + 1);
+
+ assert(type != NULL);
+ var->type = type;
+ }
+
+ return visit_continue;
+ }
+ } v;
+
+ v.run(linked->ir);
+ }
+
+ return linked;
+}
+
+
+struct uniform_node {
+ exec_node link;
+ struct gl_uniform *u;
+ unsigned slots;
+};
+
+/**
+ * Update the sizes of linked shader uniform arrays to the maximum
+ * array index used.
+ *
+ * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
+ *
+ * If one or more elements of an array are active,
+ * GetActiveUniform will return the name of the array in name,
+ * subject to the restrictions listed above. The type of the array
+ * is returned in type. The size parameter contains the highest
+ * array element index used, plus one. The compiler or linker
+ * determines the highest index used. There will be only one
+ * active uniform reported by the GL per uniform array.
+
+ */
+static void
+update_array_sizes(struct gl_shader_program *prog)
+{
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ foreach_list(node, prog->_LinkedShaders[i]->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->mode != ir_var_uniform &&
+ var->mode != ir_var_in &&
+ var->mode != ir_var_out) ||
+ !var->type->is_array())
+ continue;
+
+ unsigned int size = var->max_array_access;
+ for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
+ if (prog->_LinkedShaders[j] == NULL)
+ continue;
+
+ foreach_list(node2, prog->_LinkedShaders[j]->ir) {
+ ir_variable *other_var = ((ir_instruction *) node2)->as_variable();
+ if (!other_var)
+ continue;
+
+ if (strcmp(var->name, other_var->name) == 0 &&
+ other_var->max_array_access > size) {
+ size = other_var->max_array_access;
+ }
+ }
+ }
+
+ if (size + 1 != var->type->fields.array->length) {
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ size + 1);
+ /* FINISHME: We should update the types of array
+ * dereferences of this variable now.
+ */
+ }
+ }
+ }
+}
+
+static void
+add_uniform(void *mem_ctx, exec_list *uniforms, struct hash_table *ht,
+ const char *name, const glsl_type *type, GLenum shader_type,
+ unsigned *next_shader_pos, unsigned *total_uniforms)
+{
+ if (type->is_record()) {
+ for (unsigned int i = 0; i < type->length; i++) {
+ const glsl_type *field_type = type->fields.structure[i].type;
+ char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name,
+ type->fields.structure[i].name);
+
+ add_uniform(mem_ctx, uniforms, ht, field_name, field_type,
+ shader_type, next_shader_pos, total_uniforms);
+ }
+ } else {
+ uniform_node *n = (uniform_node *) hash_table_find(ht, name);
+ unsigned int vec4_slots;
+ const glsl_type *array_elem_type = NULL;
+
+ if (type->is_array()) {
+ array_elem_type = type->fields.array;
+ /* Array of structures. */
+ if (array_elem_type->is_record()) {
+ for (unsigned int i = 0; i < type->length; i++) {
+ char *elem_name = ralloc_asprintf(mem_ctx, "%s[%d]", name, i);
+ add_uniform(mem_ctx, uniforms, ht, elem_name, array_elem_type,
+ shader_type, next_shader_pos, total_uniforms);
+ }
+ return;
+ }
+ }
+
+ /* Fix the storage size of samplers at 1 vec4 each. Be sure to pad out
+ * vectors to vec4 slots.
+ */
+ if (type->is_array()) {
+ if (array_elem_type->is_sampler())
+ vec4_slots = type->length;
+ else
+ vec4_slots = type->length * array_elem_type->matrix_columns;
+ } else if (type->is_sampler()) {
+ vec4_slots = 1;
+ } else {
+ vec4_slots = type->matrix_columns;
+ }
+
+ if (n == NULL) {
+ n = (uniform_node *) calloc(1, sizeof(struct uniform_node));
+ n->u = (gl_uniform *) calloc(1, sizeof(struct gl_uniform));
+ n->slots = vec4_slots;
+
+ n->u->Name = strdup(name);
+ n->u->Type = type;
+ n->u->VertPos = -1;
+ n->u->FragPos = -1;
+ n->u->GeomPos = -1;
+ (*total_uniforms)++;
+
+ hash_table_insert(ht, n, name);
+ uniforms->push_tail(& n->link);
+ }
+
+ switch (shader_type) {
+ case GL_VERTEX_SHADER:
+ n->u->VertPos = *next_shader_pos;
+ break;
+ case GL_FRAGMENT_SHADER:
+ n->u->FragPos = *next_shader_pos;
+ break;
+ case GL_GEOMETRY_SHADER:
+ n->u->GeomPos = *next_shader_pos;
+ break;
+ }
+
+ (*next_shader_pos) += vec4_slots;
+ }
+}
+
+void
+assign_uniform_locations(struct gl_shader_program *prog)
+{
+ /* */
+ exec_list uniforms;
+ unsigned total_uniforms = 0;
+ hash_table *ht = hash_table_ctor(32, hash_table_string_hash,
+ hash_table_string_compare);
+ void *mem_ctx = ralloc_context(NULL);
+
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ unsigned next_position = 0;
+
+ foreach_list(node, prog->_LinkedShaders[i]->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->mode != ir_var_uniform))
+ continue;
+
+ if (strncmp(var->name, "gl_", 3) == 0) {
+ /* At the moment, we don't allocate uniform locations for
+ * builtin uniforms. It's permitted by spec, and we'll
+ * likely switch to doing that at some point, but not yet.
+ */
+ continue;
+ }
+
+ var->location = next_position;
+ add_uniform(mem_ctx, &uniforms, ht, var->name, var->type,
+ prog->_LinkedShaders[i]->Type,
+ &next_position, &total_uniforms);
+ }
+ }
+
+ ralloc_free(mem_ctx);
+
+ gl_uniform_list *ul = (gl_uniform_list *)
+ calloc(1, sizeof(gl_uniform_list));
+
+ ul->Size = total_uniforms;
+ ul->NumUniforms = total_uniforms;
+ ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform));
+
+ unsigned idx = 0;
+ uniform_node *next;
+ for (uniform_node *node = (uniform_node *) uniforms.head
+ ; node->link.next != NULL
+ ; node = next) {
+ next = (uniform_node *) node->link.next;
+
+ node->link.remove();
+ memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform));
+ idx++;
+
+ free(node->u);
+ free(node);
+ }
+
+ hash_table_dtor(ht);
+
+ prog->Uniforms = ul;
+}
+
+
+/**
+ * Find a contiguous set of available bits in a bitmask
+ *
+ * \param used_mask Bits representing used (1) and unused (0) locations
+ * \param needed_count Number of contiguous bits needed.
+ *
+ * \return
+ * Base location of the available bits on success or -1 on failure.
+ */
+int
+find_available_slots(unsigned used_mask, unsigned needed_count)
+{
+ unsigned needed_mask = (1 << needed_count) - 1;
+ const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
+
+ /* The comparison to 32 is redundant, but without it GCC emits "warning:
+ * cannot optimize possibly infinite loops" for the loop below.
+ */
+ if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
+ return -1;
+
+ for (int i = 0; i <= max_bit_to_test; i++) {
+ if ((needed_mask & ~used_mask) == needed_mask)
+ return i;
+
+ needed_mask <<= 1;
+ }
+
+ return -1;
+}
+
+
+bool
+assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index)
+{
+ /* Mark invalid attribute locations as being used.
+ */
+ unsigned used_locations = (max_attribute_index >= 32)
+ ? ~0 : ~((1 << max_attribute_index) - 1);
+
+ gl_shader *const sh = prog->_LinkedShaders[0];
+ assert(sh->Type == GL_VERTEX_SHADER);
+
+ /* Operate in a total of four passes.
+ *
+ * 1. Invalidate the location assignments for all vertex shader inputs.
+ *
+ * 2. Assign locations for inputs that have user-defined (via
+ * glBindVertexAttribLocation) locatoins.
+ *
+ * 3. Sort the attributes without assigned locations by number of slots
+ * required in decreasing order. Fragmentation caused by attribute
+ * locations assigned by the application may prevent large attributes
+ * from having enough contiguous space.
+ *
+ * 4. Assign locations to any inputs without assigned locations.
+ */
+
+ invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0);
+
+ if (prog->Attributes != NULL) {
+ for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) {
+ ir_variable *const var =
+ sh->symbols->get_variable(prog->Attributes->Parameters[i].Name);
+
+ /* Note: attributes that occupy multiple slots, such as arrays or
+ * matrices, may appear in the attrib array multiple times.
+ */
+ if ((var == NULL) || (var->location != -1))
+ continue;
+
+ /* From page 61 of the OpenGL 4.0 spec:
+ *
+ * "LinkProgram will fail if the attribute bindings assigned by
+ * BindAttribLocation do not leave not enough space to assign a
+ * location for an active matrix attribute or an active attribute
+ * array, both of which require multiple contiguous generic
+ * attributes."
+ *
+ * Previous versions of the spec contain similar language but omit the
+ * bit about attribute arrays.
+ *
+ * Page 61 of the OpenGL 4.0 spec also says:
+ *
+ * "It is possible for an application to bind more than one
+ * attribute name to the same location. This is referred to as
+ * aliasing. This will only work if only one of the aliased
+ * attributes is active in the executable program, or if no path
+ * through the shader consumes more than one attribute of a set
+ * of attributes aliased to the same location. A link error can
+ * occur if the linker determines that every path through the
+ * shader consumes multiple aliased attributes, but
+ * implementations are not required to generate an error in this
+ * case."
+ *
+ * These two paragraphs are either somewhat contradictory, or I don't
+ * fully understand one or both of them.
+ */
+ /* FINISHME: The code as currently written does not support attribute
+ * FINISHME: location aliasing (see comment above).
+ */
+ const int attr = prog->Attributes->Parameters[i].StateIndexes[0];
+ const unsigned slots = count_attribute_slots(var->type);
+
+ /* Mask representing the contiguous slots that will be used by this
+ * attribute.
+ */
+ const unsigned use_mask = (1 << slots) - 1;
+
+ /* Generate a link error if the set of bits requested for this
+ * attribute overlaps any previously allocated bits.
+ */
+ if ((~(use_mask << attr) & used_locations) != used_locations) {
+ linker_error_printf(prog,
+ "insufficient contiguous attribute locations "
+ "available for vertex shader input `%s'",
+ var->name);
+ return false;
+ }
+
+ var->location = VERT_ATTRIB_GENERIC0 + attr;
+ used_locations |= (use_mask << attr);
+ }
+ }
+
+ /* Temporary storage for the set of attributes that need locations assigned.
+ */
+ struct temp_attr {
+ unsigned slots;
+ ir_variable *var;
+
+ /* Used below in the call to qsort. */
+ static int compare(const void *a, const void *b)
+ {
+ const temp_attr *const l = (const temp_attr *) a;
+ const temp_attr *const r = (const temp_attr *) b;
+
+ /* Reversed because we want a descending order sort below. */
+ return r->slots - l->slots;
+ }
+ } to_assign[16];
+
+ unsigned num_attr = 0;
+
+ foreach_list(node, sh->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->mode != ir_var_in))
+ continue;
+
+ if (var->explicit_location) {
+ const unsigned slots = count_attribute_slots(var->type);
+ const unsigned use_mask = (1 << slots) - 1;
+ const int attr = var->location - VERT_ATTRIB_GENERIC0;
+
+ if ((var->location >= (int)(max_attribute_index + VERT_ATTRIB_GENERIC0))
+ || (var->location < 0)) {
+ linker_error_printf(prog,
+ "invalid explicit location %d specified for "
+ "`%s'\n",
+ (var->location < 0) ? var->location : attr,
+ var->name);
+ return false;
+ } else if (var->location >= VERT_ATTRIB_GENERIC0) {
+ used_locations |= (use_mask << attr);
+ }
+ }
+
+ /* The location was explicitly assigned, nothing to do here.
+ */
+ if (var->location != -1)
+ continue;
+
+ to_assign[num_attr].slots = count_attribute_slots(var->type);
+ to_assign[num_attr].var = var;
+ num_attr++;
+ }
+
+ /* If all of the attributes were assigned locations by the application (or
+ * are built-in attributes with fixed locations), return early. This should
+ * be the common case.
+ */
+ if (num_attr == 0)
+ return true;
+
+ qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
+
+ /* VERT_ATTRIB_GENERIC0 is a psdueo-alias for VERT_ATTRIB_POS. It can only
+ * be explicitly assigned by via glBindAttribLocation. Mark it as reserved
+ * to prevent it from being automatically allocated below.
+ */
+ find_deref_visitor find("gl_Vertex");
+ find.run(sh->ir);
+ if (find.variable_found())
+ used_locations |= (1 << 0);
+
+ for (unsigned i = 0; i < num_attr; i++) {
+ /* Mask representing the contiguous slots that will be used by this
+ * attribute.
+ */
+ const unsigned use_mask = (1 << to_assign[i].slots) - 1;
+
+ int location = find_available_slots(used_locations, to_assign[i].slots);
+
+ if (location < 0) {
+ linker_error_printf(prog,
+ "insufficient contiguous attribute locations "
+ "available for vertex shader input `%s'",
+ to_assign[i].var->name);
+ return false;
+ }
+
+ to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location;
+ used_locations |= (use_mask << location);
+ }
+
+ return true;
+}
+
+
+/**
+ * Demote shader inputs and outputs that are not used in other stages
+ */
+void
+demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode)
+{
+ foreach_list(node, sh->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->mode != int(mode)))
+ continue;
+
+ /* A shader 'in' or 'out' variable is only really an input or output if
+ * its value is used by other shader stages. This will cause the variable
+ * to have a location assigned.
+ */
+ if (var->location == -1) {
+ var->mode = ir_var_auto;
+ }
+ }
+}
+
+
+void
+assign_varying_locations(struct gl_shader_program *prog,
+ gl_shader *producer, gl_shader *consumer)
+{
+ /* FINISHME: Set dynamically when geometry shader support is added. */
+ unsigned output_index = VERT_RESULT_VAR0;
+ unsigned input_index = FRAG_ATTRIB_VAR0;
+
+ /* Operate in a total of three passes.
+ *
+ * 1. Assign locations for any matching inputs and outputs.
+ *
+ * 2. Mark output variables in the producer that do not have locations as
+ * not being outputs. This lets the optimizer eliminate them.
+ *
+ * 3. Mark input variables in the consumer that do not have locations as
+ * not being inputs. This lets the optimizer eliminate them.
+ */
+
+ invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0);
+ invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0);
+
+ foreach_list(node, producer->ir) {
+ ir_variable *const output_var = ((ir_instruction *) node)->as_variable();
+
+ if ((output_var == NULL) || (output_var->mode != ir_var_out)
+ || (output_var->location != -1))
+ continue;
+
+ ir_variable *const input_var =
+ consumer->symbols->get_variable(output_var->name);
+
+ if ((input_var == NULL) || (input_var->mode != ir_var_in))
+ continue;
+
+ assert(input_var->location == -1);
+
+ output_var->location = output_index;
+ input_var->location = input_index;
+
+ /* FINISHME: Support for "varying" records in GLSL 1.50. */
+ assert(!output_var->type->is_record());
+
+ if (output_var->type->is_array()) {
+ const unsigned slots = output_var->type->length
+ * output_var->type->fields.array->matrix_columns;
+
+ output_index += slots;
+ input_index += slots;
+ } else {
+ const unsigned slots = output_var->type->matrix_columns;
+
+ output_index += slots;
+ input_index += slots;
+ }
+ }
+
+ foreach_list(node, consumer->ir) {
+ ir_variable *const var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->mode != ir_var_in))
+ continue;
+
+ if (var->location == -1) {
+ if (prog->Version <= 120) {
+ /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
+ *
+ * Only those varying variables used (i.e. read) in
+ * the fragment shader executable must be written to
+ * by the vertex shader executable; declaring
+ * superfluous varying variables in a vertex shader is
+ * permissible.
+ *
+ * We interpret this text as meaning that the VS must
+ * write the variable for the FS to read it. See
+ * "glsl1-varying read but not written" in piglit.
+ */
+
+ linker_error_printf(prog, "fragment shader varying %s not written "
+ "by vertex shader\n.", var->name);
+ prog->LinkStatus = false;
+ }
+
+ /* An 'in' variable is only really a shader input if its
+ * value is written by the previous stage.
+ */
+ var->mode = ir_var_auto;
+ }
+ }
+}
+
+
+void
+link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ void *mem_ctx = ralloc_context(NULL); // temporary linker context
+
+ prog->LinkStatus = false;
+ prog->Validated = false;
+ prog->_Used = false;
+
+ if (prog->InfoLog != NULL)
+ ralloc_free(prog->InfoLog);
+
+ prog->InfoLog = ralloc_strdup(NULL, "");
+
+ /* Separate the shaders into groups based on their type.
+ */
+ struct gl_shader **vert_shader_list;
+ unsigned num_vert_shaders = 0;
+ struct gl_shader **frag_shader_list;
+ unsigned num_frag_shaders = 0;
+
+ vert_shader_list = (struct gl_shader **)
+ calloc(2 * prog->NumShaders, sizeof(struct gl_shader *));
+ frag_shader_list = &vert_shader_list[prog->NumShaders];
+
+ unsigned min_version = UINT_MAX;
+ unsigned max_version = 0;
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ min_version = MIN2(min_version, prog->Shaders[i]->Version);
+ max_version = MAX2(max_version, prog->Shaders[i]->Version);
+
+ switch (prog->Shaders[i]->Type) {
+ case GL_VERTEX_SHADER:
+ vert_shader_list[num_vert_shaders] = prog->Shaders[i];
+ num_vert_shaders++;
+ break;
+ case GL_FRAGMENT_SHADER:
+ frag_shader_list[num_frag_shaders] = prog->Shaders[i];
+ num_frag_shaders++;
+ break;
+ case GL_GEOMETRY_SHADER:
+ /* FINISHME: Support geometry shaders. */
+ assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER);
+ break;
+ }
+ }
+
+ /* Previous to GLSL version 1.30, different compilation units could mix and
+ * match shading language versions. With GLSL 1.30 and later, the versions
+ * of all shaders must match.
+ */
+ assert(min_version >= 100);
+ assert(max_version <= 130);
+ if ((max_version >= 130 || min_version == 100)
+ && min_version != max_version) {
+ linker_error_printf(prog, "all shaders must use same shading "
+ "language version\n");
+ goto done;
+ }
+
+ prog->Version = max_version;
+
+ for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
+ if (prog->_LinkedShaders[i] != NULL)
+ ctx->Driver.DeleteShader(ctx, prog->_LinkedShaders[i]);
+
+ prog->_LinkedShaders[i] = NULL;
+ }
+
+ /* Link all shaders for a particular stage and validate the result.
+ */
+ if (num_vert_shaders > 0) {
+ gl_shader *const sh =
+ link_intrastage_shaders(mem_ctx, ctx, prog, vert_shader_list,
+ num_vert_shaders);
+
+ if (sh == NULL)
+ goto done;
+
+ if (!validate_vertex_shader_executable(prog, sh))
+ goto done;
+
+ _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_VERTEX],
+ sh);
+ }
+
+ if (num_frag_shaders > 0) {
+ gl_shader *const sh =
+ link_intrastage_shaders(mem_ctx, ctx, prog, frag_shader_list,
+ num_frag_shaders);
+
+ if (sh == NULL)
+ goto done;
+
+ if (!validate_fragment_shader_executable(prog, sh))
+ goto done;
+
+ _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
+ sh);
+ }
+
+ /* Here begins the inter-stage linking phase. Some initial validation is
+ * performed, then locations are assigned for uniforms, attributes, and
+ * varyings.
+ */
+ if (cross_validate_uniforms(prog)) {
+ unsigned prev;
+
+ for (prev = 0; prev < MESA_SHADER_TYPES; prev++) {
+ if (prog->_LinkedShaders[prev] != NULL)
+ break;
+ }
+
+ /* Validate the inputs of each stage with the output of the preceeding
+ * stage.
+ */
+ for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ if (!cross_validate_outputs_to_inputs(prog,
+ prog->_LinkedShaders[prev],
+ prog->_LinkedShaders[i]))
+ goto done;
+
+ prev = i;
+ }
+
+ prog->LinkStatus = true;
+ }
+
+ /* Do common optimization before assigning storage for attributes,
+ * uniforms, and varyings. Later optimization could possibly make
+ * some of that unused.
+ */
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, 32))
+ ;
+ }
+
+ update_array_sizes(prog);
+
+ assign_uniform_locations(prog);
+
+ if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
+ /* FINISHME: The value of the max_attribute_index parameter is
+ * FINISHME: implementation dependent based on the value of
+ * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
+ * FINISHME: at least 16, so hardcode 16 for now.
+ */
+ if (!assign_attribute_locations(prog, 16)) {
+ prog->LinkStatus = false;
+ goto done;
+ }
+ }
+
+ unsigned prev;
+ for (prev = 0; prev < MESA_SHADER_TYPES; prev++) {
+ if (prog->_LinkedShaders[prev] != NULL)
+ break;
+ }
+
+ for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ assign_varying_locations(prog,
+ prog->_LinkedShaders[prev],
+ prog->_LinkedShaders[i]);
+ prev = i;
+ }
+
+ if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
+ demote_shader_inputs_and_outputs(prog->_LinkedShaders[MESA_SHADER_VERTEX],
+ ir_var_out);
+ }
+
+ if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) {
+ gl_shader *const sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
+
+ demote_shader_inputs_and_outputs(sh, ir_var_in);
+ demote_shader_inputs_and_outputs(sh, ir_var_inout);
+ demote_shader_inputs_and_outputs(sh, ir_var_out);
+ }
+
+ if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] != NULL) {
+ gl_shader *const sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+
+ demote_shader_inputs_and_outputs(sh, ir_var_in);
+ }
+
+ /* OpenGL ES requires that a vertex shader and a fragment shader both be
+ * present in a linked program. By checking for use of shading language
+ * version 1.00, we also catch the GL_ARB_ES2_compatibility case.
+ */
+ if (ctx->API == API_OPENGLES2 || prog->Version == 100) {
+ if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
+ linker_error_printf(prog, "program lacks a vertex shader\n");
+ prog->LinkStatus = false;
+ } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
+ linker_error_printf(prog, "program lacks a fragment shader\n");
+ prog->LinkStatus = false;
+ }
+ }
+
+ /* FINISHME: Assign fragment shader output locations. */
+
+done:
+ free(vert_shader_list);
+
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ /* Retain any live IR, but trash the rest. */
+ reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
+ }
+
+ ralloc_free(mem_ctx);
+}
diff --git a/mesalib/src/mesa/main/mtypes.h b/mesalib/src/mesa/main/mtypes.h index 020595bd0..714d63afc 100644 --- a/mesalib/src/mesa/main/mtypes.h +++ b/mesalib/src/mesa/main/mtypes.h @@ -1240,7 +1240,7 @@ struct gl_texture_image * GL_DEPTH_COMPONENT or GL_DEPTH_STENCIL_EXT
* only. Used for choosing TexEnv arithmetic.
*/
- GLuint TexFormat; /**< The actual format: MESA_FORMAT_x */
+ gl_format TexFormat; /**< The actual texture memory format */
GLuint Border; /**< 0 or 1 */
GLuint Width; /**< = 2^WidthLog2 + 2*Border */
@@ -1643,6 +1643,7 @@ struct gl_array_attrib GLuint RestartIndex;
GLbitfield NewState; /**< mask of _NEW_ARRAY_* values */
+ GLboolean RebindArrays; /**< whether the VBO module should rebind arrays */
/* GL_ARB_vertex_buffer_object */
struct gl_buffer_object *ArrayBufferObj;
@@ -2379,7 +2380,7 @@ struct gl_renderbuffer GLenum InternalFormat; /**< The user-specified format */
GLenum _BaseFormat; /**< Either GL_RGB, GL_RGBA, GL_DEPTH_COMPONENT or
GL_STENCIL_INDEX. */
- GLuint Format; /**< The actual format: MESA_FORMAT_x */
+ gl_format Format; /**< The actual renderbuffer memory format */
GLubyte NumSamples;
diff --git a/mesalib/src/mesa/main/state.c b/mesalib/src/mesa/main/state.c index c07e2c380..f11578faf 100644 --- a/mesalib/src/mesa/main/state.c +++ b/mesalib/src/mesa/main/state.c @@ -662,6 +662,8 @@ _mesa_update_state_locked( struct gl_context *ctx ) ctx->NewState = 0;
ctx->Driver.UpdateState(ctx, new_state);
ctx->Array.NewState = 0;
+ if (!ctx->Array.RebindArrays)
+ ctx->Array.RebindArrays = (new_state & (_NEW_ARRAY | _NEW_PROGRAM)) != 0;
}
diff --git a/mesalib/src/mesa/state_tracker/st_atom_blend.c b/mesalib/src/mesa/state_tracker/st_atom_blend.c index 26bb3dab9..e3b3b9936 100644 --- a/mesalib/src/mesa/state_tracker/st_atom_blend.c +++ b/mesalib/src/mesa/state_tracker/st_atom_blend.c @@ -191,7 +191,7 @@ update_blend( struct st_context *st ) {
struct pipe_blend_state *blend = &st->state.blend;
unsigned num_state = 1;
- unsigned i;
+ unsigned i, j;
memset(blend, 0, sizeof(*blend));
@@ -214,12 +214,15 @@ update_blend( struct st_context *st ) }
else if (st->ctx->Color.BlendEnabled) {
/* blending enabled */
- for (i = 0; i < num_state; i++) {
+ for (i = 0, j = 0; i < num_state; i++) {
blend->rt[i].blend_enable = (st->ctx->Color.BlendEnabled >> i) & 0x1;
+ if (st->ctx->Extensions.ARB_draw_buffers_blend)
+ j = i;
+
blend->rt[i].rgb_func =
- translate_blend(st->ctx->Color.Blend[i].EquationRGB);
+ translate_blend(st->ctx->Color.Blend[j].EquationRGB);
if (st->ctx->Color.Blend[i].EquationRGB == GL_MIN ||
st->ctx->Color.Blend[i].EquationRGB == GL_MAX) {
@@ -229,13 +232,13 @@ update_blend( struct st_context *st ) }
else {
blend->rt[i].rgb_src_factor =
- translate_blend(st->ctx->Color.Blend[i].SrcRGB);
+ translate_blend(st->ctx->Color.Blend[j].SrcRGB);
blend->rt[i].rgb_dst_factor =
- translate_blend(st->ctx->Color.Blend[i].DstRGB);
+ translate_blend(st->ctx->Color.Blend[j].DstRGB);
}
blend->rt[i].alpha_func =
- translate_blend(st->ctx->Color.Blend[i].EquationA);
+ translate_blend(st->ctx->Color.Blend[j].EquationA);
if (st->ctx->Color.Blend[i].EquationA == GL_MIN ||
st->ctx->Color.Blend[i].EquationA == GL_MAX) {
@@ -245,9 +248,9 @@ update_blend( struct st_context *st ) }
else {
blend->rt[i].alpha_src_factor =
- translate_blend(st->ctx->Color.Blend[i].SrcA);
+ translate_blend(st->ctx->Color.Blend[j].SrcA);
blend->rt[i].alpha_dst_factor =
- translate_blend(st->ctx->Color.Blend[i].DstA);
+ translate_blend(st->ctx->Color.Blend[j].DstA);
}
}
}
diff --git a/mesalib/src/mesa/state_tracker/st_cb_bitmap.c b/mesalib/src/mesa/state_tracker/st_cb_bitmap.c index ddd130a81..51623c179 100644 --- a/mesalib/src/mesa/state_tracker/st_cb_bitmap.c +++ b/mesalib/src/mesa/state_tracker/st_cb_bitmap.c @@ -349,6 +349,7 @@ setup_bitmap_vertex_data(struct st_context *st, bool normalized, if (!st->bitmap.vbuf) {
st->bitmap.vbuf = pipe_buffer_create(pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STREAM,
max_slots *
sizeof(st->bitmap.vertices));
}
@@ -456,6 +457,7 @@ draw_bitmap_quad(struct gl_context *ctx, GLint x, GLint y, GLfloat z, cso_save_fragment_shader(cso);
cso_save_vertex_shader(cso);
cso_save_vertex_elements(cso);
+ cso_save_vertex_buffers(cso);
/* rasterizer state: just scissor */
st->bitmap.rasterizer.scissor = ctx->Scissor.Enabled;
@@ -517,7 +519,7 @@ draw_bitmap_quad(struct gl_context *ctx, GLint x, GLint y, GLfloat z, sv->texture->target != PIPE_TEXTURE_RECT,
x, y, width, height, z, color);
- util_draw_vertex_buffer(pipe, st->bitmap.vbuf, offset,
+ util_draw_vertex_buffer(pipe, st->cso_context, st->bitmap.vbuf, offset,
PIPE_PRIM_TRIANGLE_FAN,
4, /* verts */
3); /* attribs/vert */
@@ -531,6 +533,7 @@ draw_bitmap_quad(struct gl_context *ctx, GLint x, GLint y, GLfloat z, cso_restore_fragment_shader(cso);
cso_restore_vertex_shader(cso);
cso_restore_vertex_elements(cso);
+ cso_restore_vertex_buffers(cso);
}
diff --git a/mesalib/src/mesa/state_tracker/st_cb_bufferobjects.c b/mesalib/src/mesa/state_tracker/st_cb_bufferobjects.c index ba8a8cf89..9dd1f8a3e 100644 --- a/mesalib/src/mesa/state_tracker/st_cb_bufferobjects.c +++ b/mesalib/src/mesa/state_tracker/st_cb_bufferobjects.c @@ -171,7 +171,7 @@ st_bufferobj_data(struct gl_context *ctx, struct st_context *st = st_context(ctx);
struct pipe_context *pipe = st->pipe;
struct st_buffer_object *st_obj = st_buffer_object(obj);
- unsigned buffer_usage;
+ unsigned bind, pipe_usage;
st_obj->Base.Size = size;
st_obj->Base.Usage = usage;
@@ -179,22 +179,43 @@ st_bufferobj_data(struct gl_context *ctx, switch(target) {
case GL_PIXEL_PACK_BUFFER_ARB:
case GL_PIXEL_UNPACK_BUFFER_ARB:
- buffer_usage = PIPE_BIND_RENDER_TARGET;
+ bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
break;
case GL_ARRAY_BUFFER_ARB:
- buffer_usage = PIPE_BIND_VERTEX_BUFFER;
+ bind = PIPE_BIND_VERTEX_BUFFER;
break;
case GL_ELEMENT_ARRAY_BUFFER_ARB:
- buffer_usage = PIPE_BIND_INDEX_BUFFER;
+ bind = PIPE_BIND_INDEX_BUFFER;
break;
default:
- buffer_usage = 0;
+ bind = 0;
+ }
+
+ switch (usage) {
+ case GL_STATIC_DRAW:
+ case GL_STATIC_READ:
+ case GL_STATIC_COPY:
+ pipe_usage = PIPE_USAGE_STATIC;
+ break;
+ case GL_DYNAMIC_DRAW:
+ case GL_DYNAMIC_READ:
+ case GL_DYNAMIC_COPY:
+ pipe_usage = PIPE_USAGE_DYNAMIC;
+ break;
+ case GL_STREAM_DRAW:
+ case GL_STREAM_READ:
+ case GL_STREAM_COPY:
+ pipe_usage = PIPE_USAGE_STREAM;
+ break;
+ default:
+ pipe_usage = PIPE_USAGE_DEFAULT;
}
pipe_resource_reference( &st_obj->buffer, NULL );
if (size != 0) {
- st_obj->buffer = pipe_buffer_create(pipe->screen, buffer_usage, size);
+ st_obj->buffer = pipe_buffer_create(pipe->screen, bind,
+ pipe_usage, size);
if (!st_obj->buffer) {
return GL_FALSE;
diff --git a/mesalib/src/mesa/state_tracker/st_cb_clear.c b/mesalib/src/mesa/state_tracker/st_cb_clear.c index 3e27be271..461f4e315 100644 --- a/mesalib/src/mesa/state_tracker/st_cb_clear.c +++ b/mesalib/src/mesa/state_tracker/st_cb_clear.c @@ -138,6 +138,7 @@ draw_quad(struct st_context *st, if (!st->clear.vbuf) {
st->clear.vbuf = pipe_buffer_create(pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STREAM,
max_slots * sizeof(st->clear.vertices));
}
@@ -172,7 +173,8 @@ draw_quad(struct st_context *st, st->clear.vertices);
/* draw */
- util_draw_vertex_buffer(pipe,
+ util_draw_vertex_buffer(pipe,
+ st->cso_context,
st->clear.vbuf,
st->clear.vbuf_slot * sizeof(st->clear.vertices),
PIPE_PRIM_TRIANGLE_FAN,
@@ -221,6 +223,7 @@ clear_with_quad(struct gl_context *ctx, cso_save_fragment_shader(st->cso_context);
cso_save_vertex_shader(st->cso_context);
cso_save_vertex_elements(st->cso_context);
+ cso_save_vertex_buffers(st->cso_context);
/* blend state: RGBA masking */
{
@@ -309,6 +312,7 @@ clear_with_quad(struct gl_context *ctx, cso_restore_fragment_shader(st->cso_context);
cso_restore_vertex_shader(st->cso_context);
cso_restore_vertex_elements(st->cso_context);
+ cso_restore_vertex_buffers(st->cso_context);
}
diff --git a/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c b/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c index 56c7e8581..18ac6c8e0 100644 --- a/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c +++ b/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c @@ -522,10 +522,11 @@ draw_quad(struct gl_context *ctx, GLfloat x0, GLfloat y0, GLfloat z, /* allocate/load buffer object with vertex data */
buf = pipe_buffer_create(pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STATIC,
sizeof(verts));
pipe_buffer_write(st->pipe, buf, 0, sizeof(verts), verts);
- util_draw_vertex_buffer(pipe, buf, 0,
+ util_draw_vertex_buffer(pipe, st->cso_context, buf, 0,
PIPE_PRIM_QUADS,
4, /* verts */
3); /* attribs/vert */
@@ -570,6 +571,7 @@ draw_textured_quad(struct gl_context *ctx, GLint x, GLint y, GLfloat z, cso_save_fragment_shader(cso);
cso_save_vertex_shader(cso);
cso_save_vertex_elements(cso);
+ cso_save_vertex_buffers(cso);
if (write_stencil) {
cso_save_depth_stencil_alpha(cso);
cso_save_blend(cso);
@@ -686,6 +688,7 @@ draw_textured_quad(struct gl_context *ctx, GLint x, GLint y, GLfloat z, cso_restore_fragment_shader(cso);
cso_restore_vertex_shader(cso);
cso_restore_vertex_elements(cso);
+ cso_restore_vertex_buffers(cso);
if (write_stencil) {
cso_restore_depth_stencil_alpha(cso);
cso_restore_blend(cso);
diff --git a/mesalib/src/mesa/state_tracker/st_cb_drawtex.c b/mesalib/src/mesa/state_tracker/st_cb_drawtex.c index 5976f1048..db299eb8d 100644 --- a/mesalib/src/mesa/state_tracker/st_cb_drawtex.c +++ b/mesalib/src/mesa/state_tracker/st_cb_drawtex.c @@ -139,6 +139,7 @@ st_DrawTex(struct gl_context *ctx, GLfloat x, GLfloat y, GLfloat z, /* create the vertex buffer */
vbuffer = pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STREAM,
numAttribs * 4 * 4 * sizeof(GLfloat));
/* load vertex buffer */
@@ -230,6 +231,7 @@ st_DrawTex(struct gl_context *ctx, GLfloat x, GLfloat y, GLfloat z, cso_save_viewport(cso);
cso_save_vertex_shader(cso);
cso_save_vertex_elements(cso);
+ cso_save_vertex_buffers(cso);
{
void *vs = lookup_shader(pipe, numAttribs,
@@ -264,7 +266,7 @@ st_DrawTex(struct gl_context *ctx, GLfloat x, GLfloat y, GLfloat z, }
- util_draw_vertex_buffer(pipe, vbuffer,
+ util_draw_vertex_buffer(pipe, cso, vbuffer,
0, /* offset */
PIPE_PRIM_TRIANGLE_FAN,
4, /* verts */
@@ -277,6 +279,7 @@ st_DrawTex(struct gl_context *ctx, GLfloat x, GLfloat y, GLfloat z, cso_restore_viewport(cso);
cso_restore_vertex_shader(cso);
cso_restore_vertex_elements(cso);
+ cso_restore_vertex_buffers(cso);
}
diff --git a/mesalib/src/mesa/state_tracker/st_context.c b/mesalib/src/mesa/state_tracker/st_context.c index b16d965f8..8f3a7415d 100644 --- a/mesalib/src/mesa/state_tracker/st_context.c +++ b/mesalib/src/mesa/state_tracker/st_context.c @@ -203,6 +203,11 @@ static void st_destroy_context_priv( struct st_context *st ) st_destroy_drawpix(st);
st_destroy_drawtex(st);
+ /* Unreference any user vertex buffers. */
+ for (i = 0; i < st->num_user_vbs; i++) {
+ pipe_resource_reference(&st->user_vb[i], NULL);
+ }
+
for (i = 0; i < Elements(st->state.sampler_views); i++) {
pipe_sampler_view_reference(&st->state.sampler_views[i], NULL);
}
diff --git a/mesalib/src/mesa/state_tracker/st_context.h b/mesalib/src/mesa/state_tracker/st_context.h index 492ee600e..ef54fd7cd 100644 --- a/mesalib/src/mesa/state_tracker/st_context.h +++ b/mesalib/src/mesa/state_tracker/st_context.h @@ -185,6 +185,11 @@ struct st_context int force_msaa;
void *winsys_drawable_handle;
+
+ /* User vertex buffers. */
+ struct pipe_resource *user_vb[PIPE_MAX_ATTRIBS];
+ unsigned user_vb_stride[PIPE_MAX_ATTRIBS];
+ unsigned num_user_vbs;
};
diff --git a/mesalib/src/mesa/state_tracker/st_draw.c b/mesalib/src/mesa/state_tracker/st_draw.c index 19466ea44..a31cbdc9e 100644 --- a/mesalib/src/mesa/state_tracker/st_draw.c +++ b/mesalib/src/mesa/state_tracker/st_draw.c @@ -243,13 +243,11 @@ st_pipe_vertex_format(GLenum type, GLuint size, GLenum format, static GLboolean
is_interleaved_arrays(const struct st_vertex_program *vp,
const struct st_vp_variant *vpv,
- const struct gl_client_array **arrays,
- GLboolean *userSpace)
+ const struct gl_client_array **arrays)
{
GLuint attr;
const struct gl_buffer_object *firstBufObj = NULL;
GLint firstStride = -1;
- GLuint num_client_arrays = 0;
const GLubyte *client_addr = NULL;
for (attr = 0; attr < vpv->num_inputs; attr++) {
@@ -263,9 +261,8 @@ is_interleaved_arrays(const struct st_vertex_program *vp, else if (firstStride != stride) {
return GL_FALSE;
}
-
+
if (!bufObj || !bufObj->Name) {
- num_client_arrays++;
/* Try to detect if the client-space arrays are
* "close" to each other.
*/
@@ -285,57 +282,11 @@ is_interleaved_arrays(const struct st_vertex_program *vp, }
}
- *userSpace = (num_client_arrays == vpv->num_inputs);
- /* debug_printf("user space: %s (%d arrays, %d inputs)\n",
- (int)*userSpace ? "Yes" : "No", num_client_arrays, vp->num_inputs); */
-
return GL_TRUE;
}
/**
- * Compute the memory range occupied by the arrays.
- */
-static void
-get_arrays_bounds(const struct st_vertex_program *vp,
- const struct st_vp_variant *vpv,
- const struct gl_client_array **arrays,
- GLuint max_index,
- const GLubyte **low, const GLubyte **high)
-{
- const GLubyte *low_addr = NULL;
- const GLubyte *high_addr = NULL;
- GLuint attr;
-
- /* debug_printf("get_arrays_bounds: Handling %u attrs\n", vpv->num_inputs); */
-
- for (attr = 0; attr < vpv->num_inputs; attr++) {
- const GLuint mesaAttr = vp->index_to_input[attr];
- const GLint stride = arrays[mesaAttr]->StrideB;
- const GLubyte *start = arrays[mesaAttr]->Ptr;
- const unsigned sz = (arrays[mesaAttr]->Size *
- _mesa_sizeof_type(arrays[mesaAttr]->Type));
- const GLubyte *end = start + (max_index * stride) + sz;
-
- /* debug_printf("attr %u: stride %d size %u start %p end %p\n",
- attr, stride, sz, start, end); */
-
- if (attr == 0) {
- low_addr = start;
- high_addr = end;
- }
- else {
- low_addr = MIN2(low_addr, start);
- high_addr = MAX2(high_addr, end);
- }
- }
-
- *low = low_addr;
- *high = high_addr;
-}
-
-
-/**
* Set up for drawing interleaved arrays that all live in one VBO
* or all live in user space.
* \param vbuffer returns vertex buffer info
@@ -346,15 +297,21 @@ setup_interleaved_attribs(struct gl_context *ctx, const struct st_vertex_program *vp,
const struct st_vp_variant *vpv,
const struct gl_client_array **arrays,
- GLuint max_index,
- GLboolean userSpace,
struct pipe_vertex_buffer *vbuffer,
- struct pipe_vertex_element velements[])
+ struct pipe_vertex_element velements[],
+ unsigned max_index)
{
struct st_context *st = st_context(ctx);
struct pipe_context *pipe = st->pipe;
GLuint attr;
- const GLubyte *offset0 = NULL;
+ const GLubyte *low_addr = NULL;
+
+ /* Find the lowest address. */
+ for (attr = 0; attr < vpv->num_inputs; attr++) {
+ const GLubyte *start = arrays[vp->index_to_input[attr]]->Ptr;
+
+ low_addr = !low_addr ? start : MIN2(low_addr, start);
+ }
for (attr = 0; attr < vpv->num_inputs; attr++) {
const GLuint mesaAttr = vp->index_to_input[attr];
@@ -362,40 +319,28 @@ setup_interleaved_attribs(struct gl_context *ctx, struct st_buffer_object *stobj = st_buffer_object(bufobj);
GLsizei stride = arrays[mesaAttr]->StrideB;
- /*printf("stobj %u = %p\n", attr, (void*)stobj);*/
-
if (attr == 0) {
- const GLubyte *low, *high;
-
- get_arrays_bounds(vp, vpv, arrays, max_index, &low, &high);
- /* debug_printf("buffer range: %p %p range %d max index %u\n",
- low, high, high - low, max_index); */
-
- offset0 = low;
- if (userSpace) {
+ if (bufobj && bufobj->Name) {
+ vbuffer->buffer = NULL;
+ pipe_resource_reference(&vbuffer->buffer, stobj->buffer);
+ vbuffer->buffer_offset = pointer_to_offset(low_addr);
+ } else {
vbuffer->buffer =
- pipe_user_buffer_create(pipe->screen, (void *) low, high - low,
+ pipe_user_buffer_create(pipe->screen, (void*)low_addr,
+ stride * (max_index + 1),
PIPE_BIND_VERTEX_BUFFER);
vbuffer->buffer_offset = 0;
- }
- else {
- vbuffer->buffer = NULL;
- pipe_resource_reference(&vbuffer->buffer, stobj->buffer);
- vbuffer->buffer_offset = pointer_to_offset(low);
+
+ /* Track user vertex buffers. */
+ pipe_resource_reference(&st->user_vb[0], vbuffer->buffer);
+ st->user_vb_stride[0] = stride;
+ st->num_user_vbs = 1;
}
vbuffer->stride = stride; /* in bytes */
- vbuffer->max_index = max_index;
}
- /*
- if (arrays[mesaAttr]->InstanceDivisor)
- vbuffer[attr].max_index = arrays[mesaAttr]->_MaxElement;
- else
- vbuffer[attr].max_index = max_index;
- */
-
velements[attr].src_offset =
- (unsigned) (arrays[mesaAttr]->Ptr - offset0);
+ (unsigned) (arrays[mesaAttr]->Ptr - low_addr);
velements[attr].instance_divisor = arrays[mesaAttr]->InstanceDivisor;
velements[attr].vertex_buffer_index = 0;
velements[attr].src_format =
@@ -419,10 +364,9 @@ setup_non_interleaved_attribs(struct gl_context *ctx, const struct st_vertex_program *vp,
const struct st_vp_variant *vpv,
const struct gl_client_array **arrays,
- GLuint max_index,
- GLboolean *userSpace,
struct pipe_vertex_buffer vbuffer[],
- struct pipe_vertex_element velements[])
+ struct pipe_vertex_element velements[],
+ unsigned max_index)
{
struct st_context *st = st_context(ctx);
struct pipe_context *pipe = st->pipe;
@@ -433,8 +377,6 @@ setup_non_interleaved_attribs(struct gl_context *ctx, struct gl_buffer_object *bufobj = arrays[mesaAttr]->BufferObj;
GLsizei stride = arrays[mesaAttr]->StrideB;
- *userSpace = GL_FALSE;
-
if (bufobj && bufobj->Name) {
/* Attribute data is in a VBO.
* Recall that for VBOs, the gl_client_array->Ptr field is
@@ -442,37 +384,23 @@ setup_non_interleaved_attribs(struct gl_context *ctx, */
struct st_buffer_object *stobj = st_buffer_object(bufobj);
assert(stobj->buffer);
- /*printf("stobj %u = %p\n", attr, (void*) stobj);*/
vbuffer[attr].buffer = NULL;
pipe_resource_reference(&vbuffer[attr].buffer, stobj->buffer);
vbuffer[attr].buffer_offset = pointer_to_offset(arrays[mesaAttr]->Ptr);
}
else {
- /* attribute data is in user-space memory, not a VBO */
- uint bytes;
- /*printf("user-space array %d stride %d\n", attr, stride);*/
-
- *userSpace = GL_TRUE;
-
/* wrap user data */
if (arrays[mesaAttr]->Ptr) {
- /* user's vertex array */
- if (arrays[mesaAttr]->StrideB) {
- bytes = arrays[mesaAttr]->StrideB * (max_index + 1);
- }
- else {
- bytes = arrays[mesaAttr]->Size
- * _mesa_sizeof_type(arrays[mesaAttr]->Type);
- }
vbuffer[attr].buffer =
pipe_user_buffer_create(pipe->screen,
- (void *) arrays[mesaAttr]->Ptr, bytes,
+ (void *) arrays[mesaAttr]->Ptr,
+ stride * (max_index + 1),
PIPE_BIND_VERTEX_BUFFER);
}
else {
/* no array, use ctx->Current.Attrib[] value */
- bytes = sizeof(ctx->Current.Attrib[0]);
+ uint bytes = sizeof(ctx->Current.Attrib[0]);
vbuffer[attr].buffer =
pipe_user_buffer_create(pipe->screen,
(void *) ctx->Current.Attrib[mesaAttr],
@@ -482,16 +410,15 @@ setup_non_interleaved_attribs(struct gl_context *ctx, }
vbuffer[attr].buffer_offset = 0;
- }
- assert(velements[attr].src_offset <= 2048); /* 11-bit field */
+ /* Track user vertex buffers. */
+ pipe_resource_reference(&st->user_vb[attr], vbuffer->buffer);
+ st->user_vb_stride[attr] = stride;
+ st->num_user_vbs = MAX2(st->num_user_vbs, attr+1);
+ }
/* common-case setup */
vbuffer[attr].stride = stride; /* in bytes */
- if (arrays[mesaAttr]->InstanceDivisor)
- vbuffer[attr].max_index = arrays[mesaAttr]->_MaxElement;
- else
- vbuffer[attr].max_index = max_index;
velements[attr].src_offset = 0;
velements[attr].instance_divisor = arrays[mesaAttr]->InstanceDivisor;
@@ -609,6 +536,62 @@ translate_prim(const struct gl_context *ctx, unsigned prim) }
+static void
+st_validate_varrays(struct gl_context *ctx,
+ const struct gl_client_array **arrays,
+ unsigned max_index)
+{
+ struct st_context *st = st_context(ctx);
+ const struct st_vertex_program *vp;
+ const struct st_vp_variant *vpv;
+ struct pipe_vertex_buffer vbuffer[PIPE_MAX_SHADER_INPUTS];
+ struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
+ unsigned num_vbuffers, num_velements;
+ GLuint attr;
+ unsigned i;
+
+ /* must get these after state validation! */
+ vp = st->vp;
+ vpv = st->vp_variant;
+
+ memset(velements, 0, sizeof(struct pipe_vertex_element) * vpv->num_inputs);
+
+ /* Unreference any user vertex buffers. */
+ for (i = 0; i < st->num_user_vbs; i++) {
+ pipe_resource_reference(&st->user_vb[i], NULL);
+ }
+ st->num_user_vbs = 0;
+
+ /*
+ * Setup the vbuffer[] and velements[] arrays.
+ */
+ if (is_interleaved_arrays(vp, vpv, arrays)) {
+ setup_interleaved_attribs(ctx, vp, vpv, arrays, vbuffer, velements,
+ max_index);
+ num_vbuffers = 1;
+ num_velements = vpv->num_inputs;
+ if (num_velements == 0)
+ num_vbuffers = 0;
+ }
+ else {
+ setup_non_interleaved_attribs(ctx, vp, vpv, arrays,
+ vbuffer, velements, max_index);
+ num_vbuffers = vpv->num_inputs;
+ num_velements = vpv->num_inputs;
+ }
+
+ cso_set_vertex_buffers(st->cso_context, num_vbuffers, vbuffer);
+ cso_set_vertex_elements(st->cso_context, num_velements, velements);
+
+ /* unreference buffers (frees wrapped user-space buffer objects)
+ * This is OK, because the pipe driver should reference buffers by itself
+ * in set_vertex_buffers. */
+ for (attr = 0; attr < num_vbuffers; attr++) {
+ pipe_resource_reference(&vbuffer[attr].buffer, NULL);
+ assert(!vbuffer[attr].buffer);
+ }
+}
+
/**
* This function gets plugged into the VBO module and is called when
@@ -627,90 +610,79 @@ st_draw_vbo(struct gl_context *ctx, {
struct st_context *st = st_context(ctx);
struct pipe_context *pipe = st->pipe;
- const struct st_vertex_program *vp;
- const struct st_vp_variant *vpv;
- struct pipe_vertex_buffer vbuffer[PIPE_MAX_SHADER_INPUTS];
- GLuint attr;
- struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
- unsigned num_vbuffers, num_velements;
struct pipe_index_buffer ibuffer;
- GLboolean userSpace = GL_FALSE;
- GLboolean vertDataEdgeFlags;
struct pipe_draw_info info;
unsigned i;
+ GLboolean new_array =
+ st->dirty.st && (st->dirty.mesa & (_NEW_ARRAY | _NEW_PROGRAM)) != 0;
/* Mesa core state should have been validated already */
assert(ctx->NewState == 0x0);
- /* Gallium probably doesn't want this in some cases. */
- if (!index_bounds_valid)
- if (!vbo_all_varyings_in_vbos(arrays))
- vbo_get_minmax_index(ctx, prims, ib, &min_index, &max_index);
+ if (ib) {
+ /* Gallium probably doesn't want this in some cases. */
+ if (!index_bounds_valid)
+ if (!vbo_all_varyings_in_vbos(arrays))
+ vbo_get_minmax_index(ctx, prims, ib, &min_index, &max_index);
+ } else {
+ /* Get min/max index for non-indexed drawing. */
+ min_index = ~0;
+ max_index = 0;
+
+ for (i = 0; i < nr_prims; i++) {
+ min_index = MIN2(min_index, prims[i].start);
+ max_index = MAX2(max_index, prims[i].start + prims[i].count - 1);
+ }
+ }
- /* sanity check for pointer arithmetic below */
- assert(sizeof(arrays[0]->Ptr[0]) == 1);
+ /* Validate state. */
+ if (st->dirty.st) {
+ GLboolean vertDataEdgeFlags;
- vertDataEdgeFlags = arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj &&
- arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj->Name;
- if (vertDataEdgeFlags != st->vertdata_edgeflags) {
- st->vertdata_edgeflags = vertDataEdgeFlags;
- st->dirty.st |= ST_NEW_EDGEFLAGS_DATA;
- }
+ /* sanity check for pointer arithmetic below */
+ assert(sizeof(arrays[0]->Ptr[0]) == 1);
- st_validate_state(st);
+ vertDataEdgeFlags = arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj &&
+ arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj->Name;
+ if (vertDataEdgeFlags != st->vertdata_edgeflags) {
+ st->vertdata_edgeflags = vertDataEdgeFlags;
+ st->dirty.st |= ST_NEW_EDGEFLAGS_DATA;
+ }
- /* must get these after state validation! */
- vp = st->vp;
- vpv = st->vp_variant;
+ st_validate_state(st);
+
+ if (new_array) {
+ st_validate_varrays(ctx, arrays, max_index);
+ }
#if 0
- if (MESA_VERBOSE & VERBOSE_GLSL) {
- check_uniforms(ctx);
- }
+ if (MESA_VERBOSE & VERBOSE_GLSL) {
+ check_uniforms(ctx);
+ }
#else
- (void) check_uniforms;
+ (void) check_uniforms;
#endif
-
- memset(velements, 0, sizeof(struct pipe_vertex_element) * vpv->num_inputs);
- /*
- * Setup the vbuffer[] and velements[] arrays.
- */
- if (is_interleaved_arrays(vp, vpv, arrays, &userSpace)) {
- /*printf("Draw interleaved\n");*/
- setup_interleaved_attribs(ctx, vp, vpv, arrays, max_index, userSpace,
- vbuffer, velements);
- num_vbuffers = 1;
- num_velements = vpv->num_inputs;
- if (num_velements == 0)
- num_vbuffers = 0;
- }
- else {
- /*printf("Draw non-interleaved\n");*/
- setup_non_interleaved_attribs(ctx, vp, vpv, arrays, max_index,
- &userSpace, vbuffer, velements);
- num_vbuffers = vpv->num_inputs;
- num_velements = vpv->num_inputs;
}
-#if 0
- {
- GLuint i;
- for (i = 0; i < num_vbuffers; i++) {
- printf("buffers[%d].stride = %u\n", i, vbuffer[i].stride);
- printf("buffers[%d].max_index = %u\n", i, vbuffer[i].max_index);
- printf("buffers[%d].buffer_offset = %u\n", i, vbuffer[i].buffer_offset);
- printf("buffers[%d].buffer = %p\n", i, (void*) vbuffer[i].buffer);
- }
- for (i = 0; i < num_velements; i++) {
- printf("vlements[%d].vbuffer_index = %u\n", i, velements[i].vertex_buffer_index);
- printf("vlements[%d].src_offset = %u\n", i, velements[i].src_offset);
- printf("vlements[%d].format = %s\n", i, util_format_name(velements[i].src_format));
+ /* Notify the driver that the content of user buffers may have been
+ * changed. */
+ if (!new_array && st->num_user_vbs) {
+ for (i = 0; i < st->num_user_vbs; i++) {
+ if (st->user_vb[i]) {
+ unsigned stride = st->user_vb_stride[i];
+
+ if (stride) {
+ pipe->redefine_user_buffer(pipe, st->user_vb[i],
+ min_index * stride,
+ (max_index + 1 - min_index) * stride);
+ } else {
+ /* stride == 0 */
+ pipe->redefine_user_buffer(pipe, st->user_vb[i],
+ 0, st->user_vb[i]->width0);
+ }
+ }
}
}
-#endif
-
- pipe->set_vertex_buffers(pipe, num_vbuffers, vbuffer);
- cso_set_vertex_elements(st->cso_context, num_velements, velements);
setup_index_buffer(ctx, ib, &ibuffer);
pipe->set_index_buffer(pipe, &ibuffer);
@@ -744,17 +716,6 @@ st_draw_vbo(struct gl_context *ctx, }
pipe_resource_reference(&ibuffer.buffer, NULL);
-
- /* unreference buffers (frees wrapped user-space buffer objects) */
- for (attr = 0; attr < num_vbuffers; attr++) {
- pipe_resource_reference(&vbuffer[attr].buffer, NULL);
- assert(!vbuffer[attr].buffer);
- }
-
- if (userSpace)
- {
- pipe->set_vertex_buffers(pipe, 0, NULL);
- }
}
diff --git a/mesalib/src/mesa/state_tracker/st_draw_feedback.c b/mesalib/src/mesa/state_tracker/st_draw_feedback.c index 2ecb8e202..f0734eae0 100644 --- a/mesalib/src/mesa/state_tracker/st_draw_feedback.c +++ b/mesalib/src/mesa/state_tracker/st_draw_feedback.c @@ -179,7 +179,6 @@ st_feedback_draw_vbo(struct gl_context *ctx, /* common-case setup */
vbuffers[attr].stride = arrays[mesaAttr]->StrideB; /* in bytes */
- vbuffers[attr].max_index = max_index;
velements[attr].instance_divisor = 0;
velements[attr].vertex_buffer_index = attr;
velements[attr].src_format =
diff --git a/mesalib/src/mesa/state_tracker/st_gen_mipmap.c b/mesalib/src/mesa/state_tracker/st_gen_mipmap.c index 0be66a2c2..5fcd806e0 100644 --- a/mesalib/src/mesa/state_tracker/st_gen_mipmap.c +++ b/mesalib/src/mesa/state_tracker/st_gen_mipmap.c @@ -105,13 +105,12 @@ st_render_mipmap(struct st_context *st, static void
decompress_image(enum pipe_format format,
const uint8_t *src, uint8_t *dst,
- unsigned width, unsigned height)
+ unsigned width, unsigned height, unsigned src_stride)
{
const struct util_format_description *desc = util_format_description(format);
const uint bw = util_format_get_blockwidth(format);
const uint bh = util_format_get_blockheight(format);
const uint dst_stride = 4 * MAX2(width, bw);
- const uint src_stride = util_format_get_stride(format, width);
desc->unpack_rgba_8unorm(dst, dst_stride, src, src_stride, width, height);
@@ -144,10 +143,9 @@ decompress_image(enum pipe_format format, static void
compress_image(enum pipe_format format,
const uint8_t *src, uint8_t *dst,
- unsigned width, unsigned height)
+ unsigned width, unsigned height, unsigned dst_stride)
{
const struct util_format_description *desc = util_format_description(format);
- const uint dst_stride = util_format_get_stride(format, width);
const uint src_stride = 4 * width;
desc->pack_rgba_8unorm(dst, dst_stride, src, src_stride, width, height);
@@ -236,7 +234,7 @@ fallback_generate_mipmap(struct gl_context *ctx, GLenum target, dstTemp = malloc(dstWidth2 * dstHeight2 * comps + 000);
/* decompress the src image: srcData -> srcTemp */
- decompress_image(format, srcData, srcTemp, srcWidth, srcHeight);
+ decompress_image(format, srcData, srcTemp, srcWidth, srcHeight, srcTrans->stride);
_mesa_generate_mipmap_level(target, datatype, comps,
0 /*border*/,
@@ -248,7 +246,7 @@ fallback_generate_mipmap(struct gl_context *ctx, GLenum target, dstWidth2); /* stride in texels */
/* compress the new image: dstTemp -> dstData */
- compress_image(format, dstTemp, dstData, dstWidth, dstHeight);
+ compress_image(format, dstTemp, dstData, dstWidth, dstHeight, dstTrans->stride);
free(srcTemp);
free(dstTemp);
diff --git a/mesalib/src/mesa/tnl/t_draw.c b/mesalib/src/mesa/tnl/t_draw.c index 741f0ed3f..a2ca6225b 100644 --- a/mesalib/src/mesa/tnl/t_draw.c +++ b/mesalib/src/mesa/tnl/t_draw.c @@ -125,6 +125,43 @@ convert_half_to_float(const struct gl_client_array *input, }
}
+/**
+ * \brief Convert fixed-point to floating-point.
+ *
+ * In OpenGL, a fixed-point number is a "signed 2's complement 16.16 scaled
+ * integer" (Table 2.2 of the OpenGL ES 2.0 spec).
+ *
+ * If the buffer has the \c normalized flag set, the formula
+ * \code normalize(x) := (2*x + 1) / (2^16 - 1) \endcode
+ * is used to map the fixed-point numbers into the range [-1, 1].
+ */
+static void
+convert_fixed_to_float(const struct gl_client_array *input,
+ const GLubyte *ptr, GLfloat *fptr,
+ GLuint count)
+{
+ GLuint i, j;
+ const GLint size = input->Size;
+
+ if (input->Normalized) {
+ for (i = 0; i < count; ++i) {
+ const GLfixed *in = (GLfixed *) ptr;
+ for (j = 0; j < size; ++j) {
+ *fptr++ = (GLfloat) (2 * in[j] + 1) / (GLfloat) ((1 << 16) - 1);
+ }
+ ptr += input->StrideB;
+ }
+ } else {
+ for (i = 0; i < count; ++i) {
+ const GLfixed *in = (GLfixed *) ptr;
+ for (j = 0; j < size; ++j) {
+ *fptr++ = in[j] / (GLfloat) (1 << 16);
+ }
+ ptr += input->StrideB;
+ }
+ }
+}
+
/* Adjust pointer to point at first requested element, convert to
* floating point, populate VB->AttribPtr[].
*/
@@ -174,6 +211,9 @@ static void _tnl_import_array( struct gl_context *ctx, case GL_HALF_FLOAT:
convert_half_to_float(input, ptr, fptr, count, sz);
break;
+ case GL_FIXED:
+ convert_fixed_to_float(input, ptr, fptr, count);
+ break;
default:
assert(0);
break;
diff --git a/mesalib/src/mesa/vbo/vbo_exec_array.c b/mesalib/src/mesa/vbo/vbo_exec_array.c index 13b54d59c..4304985aa 100644 --- a/mesalib/src/mesa/vbo/vbo_exec_array.c +++ b/mesalib/src/mesa/vbo/vbo_exec_array.c @@ -502,8 +502,13 @@ recalculate_input_bindings(struct gl_context *ctx) static void
bind_arrays(struct gl_context *ctx)
{
+ if (!ctx->Array.RebindArrays) {
+ return;
+ }
+
bind_array_obj(ctx);
recalculate_input_bindings(ctx);
+ ctx->Array.RebindArrays = GL_FALSE;
}
diff --git a/mesalib/src/mesa/vbo/vbo_exec_draw.c b/mesalib/src/mesa/vbo/vbo_exec_draw.c index 048f3d170..a2a098857 100644 --- a/mesalib/src/mesa/vbo/vbo_exec_draw.c +++ b/mesalib/src/mesa/vbo/vbo_exec_draw.c @@ -245,6 +245,7 @@ vbo_exec_bind_arrays( struct gl_context *ctx ) arrays[attr]._MaxElement = count; /* ??? */
varying_inputs |= 1 << attr;
+ ctx->NewState |= _NEW_ARRAY;
}
}
diff --git a/mesalib/src/mesa/vbo/vbo_save_draw.c b/mesalib/src/mesa/vbo/vbo_save_draw.c index 6d8dbdb86..5a19b0d62 100644 --- a/mesalib/src/mesa/vbo/vbo_save_draw.c +++ b/mesalib/src/mesa/vbo/vbo_save_draw.c @@ -202,6 +202,7 @@ static void vbo_bind_vertex_list(struct gl_context *ctx, buffer_offset += node->attrsz[src] * sizeof(GLfloat);
varying_inputs |= 1<<attr;
+ ctx->NewState |= _NEW_ARRAY;
}
}
diff --git a/pixman/Makefile.am b/pixman/Makefile.am index 63b08c1fb..b12b212fd 100644 --- a/pixman/Makefile.am +++ b/pixman/Makefile.am @@ -1,132 +1,132 @@ -SUBDIRS = pixman test - -pkgconfigdir=$(libdir)/pkgconfig -pkgconfig_DATA=pixman-1.pc - -$(pkgconfig_DATA): pixman-1.pc.in - -snapshot: - distdir="$(distdir)-`date '+%Y%m%d'`"; \ - test -d "$(srcdir)/.git" && distdir=$$distdir-`cd "$(srcdir)" && git rev-parse HEAD | cut -c 1-6`; \ - $(MAKE) $(AM_MAKEFLAGS) distdir="$$distdir" dist - -GPGKEY=6FF7C1A8 -USERNAME=$$USER -RELEASE_OR_SNAPSHOT = $$(if test "x$(CAIRO_VERSION_MINOR)" = "x$$(echo "$(CAIRO_VERSION_MINOR)/2*2" | bc)" ; then echo release; else echo snapshot; fi) -RELEASE_CAIRO_HOST = $(USERNAME)@cairographics.org -RELEASE_CAIRO_DIR = /srv/cairo.freedesktop.org/www/releases -RELEASE_CAIRO_URL = http://cairographics.org/releases -RELEASE_XORG_URL = http://xorg.freedesktop.org/archive/individual/lib -RELEASE_XORG_HOST = $(USERNAME)@xorg.freedesktop.org -RELEASE_XORG_DIR = /srv/xorg.freedesktop.org/archive/individual/lib -RELEASE_ANNOUNCE_LIST = cairo-announce@cairographics.org, xorg-announce@lists.freedesktop.org - -tar_gz = $(PACKAGE)-$(VERSION).tar.gz -tar_bz2 = $(PACKAGE)-$(VERSION).tar.bz2 - -sha1_tgz = $(tar_gz).sha1 -md5_tgz = $(tar_gz).md5 - -sha1_tbz2 = $(tar_bz2).sha1 -md5_tbz2 = $(tar_bz2).md5 - -gpg_file = $(sha1_tgz).asc - -$(sha1_tgz): $(tar_gz) - sha1sum $^ > $@ - -$(md5_tgz): $(tar_gz) - md5sum $^ > $@ - -$(sha1_tbz2): $(tar_bz2) - sha1sum $^ > $@ - -$(md5_tbz2): $(tar_bz2) - md5sum $^ > $@ - -$(gpg_file): $(sha1_tgz) - @echo "Please enter your GPG password to sign the checksum." - gpg --armor --sign $^ - -HASHFILES = $(sha1_tgz) $(sha1_tbz2) $(md5_tgz) $(md5_tbz2) - -release-verify-newer: - @echo -n "Checking that no $(VERSION) release already exists at $(RELEASE_XORG_HOST)..." - @ssh $(RELEASE_XORG_HOST) test ! -e $(RELEASE_XORG_DIR)/$(tar_gz) \ - || (echo "Ouch." && echo "Found: $(RELEASE_XORG_HOST):$(RELEASE_XORG_DIR)/$(tar_gz)" \ - && echo "Refusing to try to generate a new release of the same name." \ - && false) - @ssh $(RELEASE_CAIRO_HOST) test ! -e $(RELEASE_CAIRO_DIR)/$(tar_gz) \ - || (echo "Ouch." && echo "Found: $(RELEASE_CAIRO_HOST):$(RELEASE_CAIRO_DIR)/$(tar_gz)" \ - && echo "Refusing to try to generate a new release of the same name." \ - && false) - @echo "Good." - -release-remove-old: - $(RM) $(tar_gz) $(tar_bz2) $(HASHFILES) $(gpg_file) - -ensure-prev: - @if [[ "$(PREV)" == "" ]]; then \ - echo "" && \ - echo "You must set the PREV variable on the make command line to" && \ - echo "the last version." && \ - echo "" && \ - echo "For example:" && \ - echo " make PREV=0.7.3" && \ - echo "" && \ - false; \ - fi - -release-check: ensure-prev release-verify-newer release-remove-old distcheck - -release-tag: - git tag -u $(GPGKEY) -m "$(PACKAGE) $(VERSION) release" $(PACKAGE)-$(VERSION) - -release-upload: release-check $(tar_gz) $(tar_bz2) $(sha1_tgz) $(sha1_tbz2) $(md5_tgz) $(gpg_file) - mkdir -p releases - scp $(tar_gz) $(sha1_tgz) $(gpg_file) $(RELEASE_CAIRO_HOST):$(RELEASE_CAIRO_DIR) - scp $(tar_gz) $(tar_bz2) $(RELEASE_XORG_HOST):$(RELEASE_XORG_DIR) - ssh $(RELEASE_CAIRO_HOST) "rm -f $(RELEASE_CAIRO_DIR)/LATEST-$(PACKAGE)-[0-9]* && ln -s $(tar_gz) $(RELEASE_CAIRO_DIR)/LATEST-$(PACKAGE)-$(VERSION)" - -release-publish-message: $(HASHFILES) ensure-prev - @echo "Please follow the instructions in RELEASING to push stuff out and" - @echo "send out the announcement mails. Here is the excerpt you need:" - @echo "" - @echo "Lists: $(RELEASE_ANNOUNCE_LIST)" - @echo "Subject: [ANNOUNCE] $(PACKAGE) release $(VERSION) now available" - @echo "============================== CUT HERE ==============================" - @echo "A new $(PACKAGE) release $(VERSION) is now available" - @echo "" - @echo "tar.gz:" - @echo " $(RELEASE_CAIRO_URL)/$(tar_gz)" - @echo " $(RELEASE_XORG_URL)/$(tar_gz)" - @echo "" - @echo "tar.bz2:" - @echo " $(RELEASE_XORG_URL)/$(tar_bz2)" - @echo "" - @echo "Hashes:" - @echo -n " MD5: " - @cat $(md5_tgz) - @echo -n " MD5: " - @cat $(md5_tbz2) - @echo -n " SHA1: " - @cat $(sha1_tgz) - @echo -n " SHA1: " - @cat $(sha1_tbz2) - @echo "" - @echo "GPG signature:" - @echo " $(RELEASE_CAIRO_URL)/$(gpg_file)" - @echo " (signed by `git config --get user.name` <`git config --get user.email`>)" - @echo "" - @echo "Git:" - @echo " git://git.freedesktop.org/git/pixman" - @echo " tag: $(PACKAGE)-$(VERSION)" - @echo "" - @echo "Log:" - @git log --no-merges "$(PACKAGE)-$(PREV)".."$(PACKAGE)-$(VERSION)" | git shortlog | awk '{ printf "\t"; print ; }' | cut -b1-80 - @echo "============================== CUT HERE ==============================" - @echo "" - -release-publish: release-upload release-tag release-publish-message - -.PHONY: release-upload release-publish release-publish-message release-tag +SUBDIRS = pixman test demos
+
+pkgconfigdir=$(libdir)/pkgconfig
+pkgconfig_DATA=pixman-1.pc
+
+$(pkgconfig_DATA): pixman-1.pc.in
+
+snapshot:
+ distdir="$(distdir)-`date '+%Y%m%d'`"; \
+ test -d "$(srcdir)/.git" && distdir=$$distdir-`cd "$(srcdir)" && git rev-parse HEAD | cut -c 1-6`; \
+ $(MAKE) $(AM_MAKEFLAGS) distdir="$$distdir" dist
+
+GPGKEY=6FF7C1A8
+USERNAME=$$USER
+RELEASE_OR_SNAPSHOT = $$(if test "x$(CAIRO_VERSION_MINOR)" = "x$$(echo "$(CAIRO_VERSION_MINOR)/2*2" | bc)" ; then echo release; else echo snapshot; fi)
+RELEASE_CAIRO_HOST = $(USERNAME)@cairographics.org
+RELEASE_CAIRO_DIR = /srv/cairo.freedesktop.org/www/releases
+RELEASE_CAIRO_URL = http://cairographics.org/releases
+RELEASE_XORG_URL = http://xorg.freedesktop.org/archive/individual/lib
+RELEASE_XORG_HOST = $(USERNAME)@xorg.freedesktop.org
+RELEASE_XORG_DIR = /srv/xorg.freedesktop.org/archive/individual/lib
+RELEASE_ANNOUNCE_LIST = cairo-announce@cairographics.org, xorg-announce@lists.freedesktop.org
+
+tar_gz = $(PACKAGE)-$(VERSION).tar.gz
+tar_bz2 = $(PACKAGE)-$(VERSION).tar.bz2
+
+sha1_tgz = $(tar_gz).sha1
+md5_tgz = $(tar_gz).md5
+
+sha1_tbz2 = $(tar_bz2).sha1
+md5_tbz2 = $(tar_bz2).md5
+
+gpg_file = $(sha1_tgz).asc
+
+$(sha1_tgz): $(tar_gz)
+ sha1sum $^ > $@
+
+$(md5_tgz): $(tar_gz)
+ md5sum $^ > $@
+
+$(sha1_tbz2): $(tar_bz2)
+ sha1sum $^ > $@
+
+$(md5_tbz2): $(tar_bz2)
+ md5sum $^ > $@
+
+$(gpg_file): $(sha1_tgz)
+ @echo "Please enter your GPG password to sign the checksum."
+ gpg --armor --sign $^
+
+HASHFILES = $(sha1_tgz) $(sha1_tbz2) $(md5_tgz) $(md5_tbz2)
+
+release-verify-newer:
+ @echo -n "Checking that no $(VERSION) release already exists at $(RELEASE_XORG_HOST)..."
+ @ssh $(RELEASE_XORG_HOST) test ! -e $(RELEASE_XORG_DIR)/$(tar_gz) \
+ || (echo "Ouch." && echo "Found: $(RELEASE_XORG_HOST):$(RELEASE_XORG_DIR)/$(tar_gz)" \
+ && echo "Refusing to try to generate a new release of the same name." \
+ && false)
+ @ssh $(RELEASE_CAIRO_HOST) test ! -e $(RELEASE_CAIRO_DIR)/$(tar_gz) \
+ || (echo "Ouch." && echo "Found: $(RELEASE_CAIRO_HOST):$(RELEASE_CAIRO_DIR)/$(tar_gz)" \
+ && echo "Refusing to try to generate a new release of the same name." \
+ && false)
+ @echo "Good."
+
+release-remove-old:
+ $(RM) $(tar_gz) $(tar_bz2) $(HASHFILES) $(gpg_file)
+
+ensure-prev:
+ @if [[ "$(PREV)" == "" ]]; then \
+ echo "" && \
+ echo "You must set the PREV variable on the make command line to" && \
+ echo "the last version." && \
+ echo "" && \
+ echo "For example:" && \
+ echo " make PREV=0.7.3" && \
+ echo "" && \
+ false; \
+ fi
+
+release-check: ensure-prev release-verify-newer release-remove-old distcheck
+
+release-tag:
+ git tag -u $(GPGKEY) -m "$(PACKAGE) $(VERSION) release" $(PACKAGE)-$(VERSION)
+
+release-upload: release-check $(tar_gz) $(tar_bz2) $(sha1_tgz) $(sha1_tbz2) $(md5_tgz) $(gpg_file)
+ mkdir -p releases
+ scp $(tar_gz) $(sha1_tgz) $(gpg_file) $(RELEASE_CAIRO_HOST):$(RELEASE_CAIRO_DIR)
+ scp $(tar_gz) $(tar_bz2) $(RELEASE_XORG_HOST):$(RELEASE_XORG_DIR)
+ ssh $(RELEASE_CAIRO_HOST) "rm -f $(RELEASE_CAIRO_DIR)/LATEST-$(PACKAGE)-[0-9]* && ln -s $(tar_gz) $(RELEASE_CAIRO_DIR)/LATEST-$(PACKAGE)-$(VERSION)"
+
+release-publish-message: $(HASHFILES) ensure-prev
+ @echo "Please follow the instructions in RELEASING to push stuff out and"
+ @echo "send out the announcement mails. Here is the excerpt you need:"
+ @echo ""
+ @echo "Lists: $(RELEASE_ANNOUNCE_LIST)"
+ @echo "Subject: [ANNOUNCE] $(PACKAGE) release $(VERSION) now available"
+ @echo "============================== CUT HERE =============================="
+ @echo "A new $(PACKAGE) release $(VERSION) is now available"
+ @echo ""
+ @echo "tar.gz:"
+ @echo " $(RELEASE_CAIRO_URL)/$(tar_gz)"
+ @echo " $(RELEASE_XORG_URL)/$(tar_gz)"
+ @echo ""
+ @echo "tar.bz2:"
+ @echo " $(RELEASE_XORG_URL)/$(tar_bz2)"
+ @echo ""
+ @echo "Hashes:"
+ @echo -n " MD5: "
+ @cat $(md5_tgz)
+ @echo -n " MD5: "
+ @cat $(md5_tbz2)
+ @echo -n " SHA1: "
+ @cat $(sha1_tgz)
+ @echo -n " SHA1: "
+ @cat $(sha1_tbz2)
+ @echo ""
+ @echo "GPG signature:"
+ @echo " $(RELEASE_CAIRO_URL)/$(gpg_file)"
+ @echo " (signed by `git config --get user.name` <`git config --get user.email`>)"
+ @echo ""
+ @echo "Git:"
+ @echo " git://git.freedesktop.org/git/pixman"
+ @echo " tag: $(PACKAGE)-$(VERSION)"
+ @echo ""
+ @echo "Log:"
+ @git log --no-merges "$(PACKAGE)-$(PREV)".."$(PACKAGE)-$(VERSION)" | git shortlog | awk '{ printf "\t"; print ; }' | cut -b1-80
+ @echo "============================== CUT HERE =============================="
+ @echo ""
+
+release-publish: release-upload release-tag release-publish-message
+
+.PHONY: release-upload release-publish release-publish-message release-tag
diff --git a/pixman/configure.ac b/pixman/configure.ac index ab2ecde1b..189cc0667 100644 --- a/pixman/configure.ac +++ b/pixman/configure.ac @@ -1,820 +1,821 @@ -dnl Copyright 2005 Red Hat, Inc. -dnl -dnl Permission to use, copy, modify, distribute, and sell this software and its -dnl documentation for any purpose is hereby granted without fee, provided that -dnl the above copyright notice appear in all copies and that both that -dnl copyright notice and this permission notice appear in supporting -dnl documentation, and that the name of Red Hat not be used in -dnl advertising or publicity pertaining to distribution of the software without -dnl specific, written prior permission. Red Hat makes no -dnl representations about the suitability of this software for any purpose. It -dnl is provided "as is" without express or implied warranty. -dnl -dnl RED HAT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, -dnl INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO -dnl EVENT SHALL RED HAT BE LIABLE FOR ANY SPECIAL, INDIRECT OR -dnl CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, -dnl DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -dnl TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -dnl PERFORMANCE OF THIS SOFTWARE. -dnl -dnl Process this file with autoconf to create configure. - -AC_PREREQ([2.57]) - -# Pixman versioning scheme -# -# - The version in git has an odd MICRO version number -# -# - Released versions both development and stable have an even MICRO -# version number -# -# - Released development versions have an odd MINOR number -# -# - Released stable versions have an even MINOR number -# -# - Versions that break ABI must have a new MAJOR number -# -# - If you break the ABI, then at least this must be done: -# -# - increment MAJOR -# -# - In the first development release where you break ABI, find -# all instances of "pixman-n" and change them to pixman-(n+1) -# -# This needs to be done at least in -# configure.ac -# all Makefile.am's -# pixman-n.pc.in -# -# This ensures that binary incompatible versions can be installed -# in parallel. See http://www106.pair.com/rhp/parallel.html for -# more information -# - -m4_define([pixman_major], 0) -m4_define([pixman_minor], 21) -m4_define([pixman_micro], 5) - -m4_define([pixman_version],[pixman_major.pixman_minor.pixman_micro]) - -AC_INIT(pixman, pixman_version, [pixman@lists.freedesktop.org], pixman) -AM_INIT_AUTOMAKE([foreign dist-bzip2]) - -# Suppress verbose compile lines -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -AM_CONFIG_HEADER(config.h) - -AC_CANONICAL_HOST - -test_CFLAGS=${CFLAGS+set} # We may override autoconf default CFLAGS. - -AC_PROG_CC -AM_PROG_AS -AC_PROG_LIBTOOL -AC_CHECK_FUNCS([getisax]) -AC_C_BIGENDIAN -AC_C_INLINE - -dnl PIXMAN_LINK_WITH_ENV(env-setup, program, true-action, false-action) -dnl -dnl Compiles and links the given program in the environment setup by env-setup -dnl and executes true-action on success and false-action on failure. -AC_DEFUN([PIXMAN_LINK_WITH_ENV],[dnl - save_CFLAGS="$CFLAGS" - save_LDFLAGS="$LDFLAGS" - save_LIBS="$LIBS" - CFLAGS="" - LDFLAGS="" - LIBS="" - $1 - AC_LINK_IFELSE( - [$2], - [pixman_cc_stderr=`test -f conftest.err && cat conftest.err` - pixman_cc_flag=yes], - [pixman_cc_stderr=`test -f conftest.err && cat conftest.err` - pixman_cc_flag=no]) - - if test "x$pixman_cc_stderr" != "x"; then - pixman_cc_flag=no - fi - - if test "x$pixman_cc_flag" = "xyes"; then - ifelse([$3], , :, [$3]) - else - ifelse([$4], , :, [$4]) - fi - CFLAGS="$save_CFLAGS" - LDFLAGS="$save_LDFLAGS" - LIBS="$save_LIBS" -]) - -dnl Find a -Werror for catching warnings. -WERROR= -for w in -Werror -errwarn; do - if test "z$WERROR" = "z"; then - AC_MSG_CHECKING([whether the compiler supports $w]) - PIXMAN_LINK_WITH_ENV( - [CFLAGS=$w], - [int main(int c, char **v) { (void)c; (void)v; return 0; }], - [WERROR=$w; yesno=yes], [yesno=no]) - AC_MSG_RESULT($_yesno) - fi -done - -dnl PIXMAN_CHECK_CFLAG(flag, [program]) -dnl Adds flag to CFLAGS if the given program links without warnings or errors. -AC_DEFUN([PIXMAN_CHECK_CFLAG], [dnl - AC_MSG_CHECKING([whether the compiler supports $1]) - PIXMAN_LINK_WITH_ENV( - [CFLAGS="$WERROR $1"], - [$2 - int main(int c, char **v) { (void)c; (void)v; return 0; } - ], - [_yesno=yes], - [_yesno=no]) - if test "x$_yesno" = xyes; then - CFLAGS="$CFLAGS $1" - fi - AC_MSG_RESULT($_yesno) -]) - -AC_CHECK_SIZEOF(long) - -# Checks for Sun Studio compilers -AC_CHECK_DECL([__SUNPRO_C], [SUNCC="yes"], [SUNCC="no"]) -AC_CHECK_DECL([__amd64], [AMD64_ABI="yes"], [AMD64_ABI="no"]) - -# Default CFLAGS to -O -g rather than just the -g from AC_PROG_CC -# if we're using Sun Studio and neither the user nor a config.site -# has set CFLAGS. -if test $SUNCC = yes && \ - test "$test_CFLAGS" == "" && \ - test "$CFLAGS" = "-g" -then - CFLAGS="-O -g" -fi - -# -# We ignore pixman_major in the version here because the major version should -# always be encoded in the actual library name. Ie., the soname is: -# -# pixman-$(pixman_major).0.minor.micro -# -m4_define([lt_current], [pixman_minor]) -m4_define([lt_revision], [pixman_micro]) -m4_define([lt_age], [pixman_minor]) - -LT_VERSION_INFO="lt_current:lt_revision:lt_age" - -PIXMAN_VERSION_MAJOR=pixman_major() -AC_SUBST(PIXMAN_VERSION_MAJOR) -PIXMAN_VERSION_MINOR=pixman_minor() -AC_SUBST(PIXMAN_VERSION_MINOR) -PIXMAN_VERSION_MICRO=pixman_micro() -AC_SUBST(PIXMAN_VERSION_MICRO) - -AC_SUBST(LT_VERSION_INFO) - -# Check for dependencies - -PIXMAN_CHECK_CFLAG([-Wall]) -PIXMAN_CHECK_CFLAG([-fno-strict-aliasing]) - -AC_PATH_PROG(PERL, perl, no) -if test "x$PERL" = xno; then - AC_MSG_ERROR([Perl is required to build pixman.]) -fi -AC_SUBST(PERL) - -dnl ========================================================================= -dnl OpenMP for the test suite? -dnl - -# Check for OpenMP support (only supported by autoconf >=2.62) -OPENMP_CFLAGS= -m4_ifdef([AC_OPENMP], [AC_OPENMP]) - -m4_define([openmp_test_program],[dnl -#include <stdio.h> - -extern unsigned int lcg_seed; -#pragma omp threadprivate(lcg_seed) -unsigned int lcg_seed; - -unsigned function(unsigned a, unsigned b) -{ - lcg_seed ^= b; - return ((a + b) ^ a ) + lcg_seed; -} - -int main(int argc, char **argv) -{ - int i; - int n1 = 0, n2 = argc; - unsigned checksum = 0; - int verbose = argv != NULL; - unsigned (*test_function)(unsigned, unsigned); - test_function = function; - #pragma omp parallel for reduction(+:checksum) default(none) \ - shared(n1, n2, test_function, verbose) - for (i = n1; i < n2; i++) - { - unsigned crc = test_function (i, 0); - if (verbose) - printf ("%d: %08X\n", i, crc); - checksum += crc; - } - printf("%u\n", checksum); - return 0; -} -]) - -PIXMAN_LINK_WITH_ENV( - [CFLAGS="$OPENMP_CFLAGS" LDFLAGS="$OPENMP_CFLAGS"], - [openmp_test_program], - [have_openmp=yes], - [have_openmp=no]) -if test "x$have_openmp" = "xyes"; then - AC_DEFINE(USE_OPENMP, 1, [use OpenMP in the test suite]) -else - OPENMP_CFLAGS="" -fi -AC_SUBST(OPENMP_CFLAGS) - -dnl ========================================================================= -dnl -fvisibility stuff - -PIXMAN_CHECK_CFLAG([-fvisibility=hidden], [dnl -#if defined(__GNUC__) && (__GNUC__ >= 4) -#ifdef _WIN32 -#error Have -fvisibility but it is ignored and generates a warning -#endif -#else -error Need GCC 4.0 for visibility -#endif -]) - -PIXMAN_CHECK_CFLAG([-xldscope=hidden], [dnl -#if defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550) -#else -error Need Sun Studio 8 for visibility -#endif -]) - -dnl =========================================================================== -dnl Check for MMX - -if test "x$MMX_CFLAGS" = "x" ; then - if test "x$SUNCC" = "xyes"; then - # Sun Studio doesn't have an -xarch=mmx flag, so we have to use sse - # but if we're building 64-bit, mmx & sse support is on by default and - # -xarch=sse throws an error instead - if test "$AMD64_ABI" = "no" ; then - MMX_CFLAGS="-xarch=sse" - fi - else - MMX_CFLAGS="-mmmx -Winline" - fi -fi - -have_mmx_intrinsics=no -AC_MSG_CHECKING(whether to use MMX intrinsics) -xserver_save_CFLAGS=$CFLAGS -CFLAGS="$MMX_CFLAGS $CFLAGS" -AC_COMPILE_IFELSE([ -#if defined(__GNUC__) && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)) -error "Need GCC >= 3.4 for MMX intrinsics" -#endif -#include <mmintrin.h> -int main () { - __m64 v = _mm_cvtsi32_si64 (1); - return _mm_cvtsi64_si32 (v); -}], have_mmx_intrinsics=yes) -CFLAGS=$xserver_save_CFLAGS - -AC_ARG_ENABLE(mmx, - [AC_HELP_STRING([--disable-mmx], - [disable MMX fast paths])], - [enable_mmx=$enableval], [enable_mmx=auto]) - -if test $enable_mmx = no ; then - have_mmx_intrinsics=disabled -fi - -if test $have_mmx_intrinsics = yes ; then - AC_DEFINE(USE_MMX, 1, [use MMX compiler intrinsics]) -else - MMX_CFLAGS= -fi - -AC_MSG_RESULT($have_mmx_intrinsics) -if test $enable_mmx = yes && test $have_mmx_intrinsics = no ; then - AC_MSG_ERROR([MMX intrinsics not detected]) -fi - -AM_CONDITIONAL(USE_MMX, test $have_mmx_intrinsics = yes) - -dnl =========================================================================== -dnl Check for SSE2 - -if test "x$SSE2_CFLAGS" = "x" ; then - if test "x$SUNCC" = "xyes"; then - # SSE2 is enabled by default in the Sun Studio 64-bit environment - if test "$AMD64_ABI" = "no" ; then - SSE2_CFLAGS="-xarch=sse2" - fi - else - SSE2_CFLAGS="-mmmx -msse2 -Winline" - fi -fi - -have_sse2_intrinsics=no -AC_MSG_CHECKING(whether to use SSE2 intrinsics) -xserver_save_CFLAGS=$CFLAGS -CFLAGS="$SSE2_CFLAGS $CFLAGS" - -AC_COMPILE_IFELSE([ -#if defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)) -# if !defined(__amd64__) && !defined(__x86_64__) -# error "Need GCC >= 4.2 for SSE2 intrinsics on x86" -# endif -#endif -#include <mmintrin.h> -#include <xmmintrin.h> -#include <emmintrin.h> -int main () { - __m128i a = _mm_set1_epi32 (0), b = _mm_set1_epi32 (0), c; - c = _mm_xor_si128 (a, b); - return 0; -}], have_sse2_intrinsics=yes) -CFLAGS=$xserver_save_CFLAGS - -AC_ARG_ENABLE(sse2, - [AC_HELP_STRING([--disable-sse2], - [disable SSE2 fast paths])], - [enable_sse2=$enableval], [enable_sse2=auto]) - -if test $enable_sse2 = no ; then - have_sse2_intrinsics=disabled -fi - -if test $have_sse2_intrinsics = yes ; then - AC_DEFINE(USE_SSE2, 1, [use SSE2 compiler intrinsics]) -fi - -AC_MSG_RESULT($have_sse2_intrinsics) -if test $enable_sse2 = yes && test $have_sse2_intrinsics = no ; then - AC_MSG_ERROR([SSE2 intrinsics not detected]) -fi - -AM_CONDITIONAL(USE_SSE2, test $have_sse2_intrinsics = yes) - -dnl =========================================================================== -dnl Other special flags needed when building code using MMX or SSE instructions -case $host_os in - solaris*) - # When building 32-bit binaries, apply a mapfile to ensure that the - # binaries aren't flagged as only able to run on MMX+SSE capable CPUs - # since they check at runtime before using those instructions. - # Not all linkers grok the mapfile format so we check for that first. - if test "$AMD64_ABI" = "no" ; then - use_hwcap_mapfile=no - AC_MSG_CHECKING(whether to use a hardware capability map file) - hwcap_save_LDFLAGS="$LDFLAGS" - HWCAP_LDFLAGS='-Wl,-M,$(srcdir)/solaris-hwcap.mapfile' - LDFLAGS="$LDFLAGS -Wl,-M,pixman/solaris-hwcap.mapfile" - AC_LINK_IFELSE([int main() { return 0; }], - use_hwcap_mapfile=yes, - HWCAP_LDFLAGS="") - LDFLAGS="$hwcap_save_LDFLAGS" - AC_MSG_RESULT($use_hwcap_mapfile) - fi - if test "x$MMX_LDFLAGS" = "x" ; then - MMX_LDFLAGS="$HWCAP_LDFLAGS" - fi - if test "x$SSE2_LDFLAGS" = "x" ; then - SSE2_LDFLAGS="$HWCAP_LDFLAGS" - fi - ;; -esac - -AC_SUBST(MMX_CFLAGS) -AC_SUBST(MMX_LDFLAGS) -AC_SUBST(SSE2_CFLAGS) -AC_SUBST(SSE2_LDFLAGS) - -dnl =========================================================================== -dnl Check for VMX/Altivec -if test -n "`$CC -v 2>&1 | grep version | grep Apple`"; then - VMX_CFLAGS="-faltivec" -else - VMX_CFLAGS="-maltivec -mabi=altivec" -fi - -have_vmx_intrinsics=no -AC_MSG_CHECKING(whether to use VMX/Altivec intrinsics) -xserver_save_CFLAGS=$CFLAGS -CFLAGS="$VMX_CFLAGS $CFLAGS" -AC_COMPILE_IFELSE([ -#if defined(__GNUC__) && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)) -error "Need GCC >= 3.4 for sane altivec support" -#endif -#include <altivec.h> -int main () { - vector unsigned int v = vec_splat_u32 (1); - v = vec_sub (v, v); - return 0; -}], have_vmx_intrinsics=yes) -CFLAGS=$xserver_save_CFLAGS - -AC_ARG_ENABLE(vmx, - [AC_HELP_STRING([--disable-vmx], - [disable VMX fast paths])], - [enable_vmx=$enableval], [enable_vmx=auto]) - -if test $enable_vmx = no ; then - have_vmx_intrinsics=disabled -fi - -if test $have_vmx_intrinsics = yes ; then - AC_DEFINE(USE_VMX, 1, [use VMX compiler intrinsics]) -else - VMX_CFLAGS= -fi - -AC_MSG_RESULT($have_vmx_intrinsics) -if test $enable_vmx = yes && test $have_vmx_intrinsics = no ; then - AC_MSG_ERROR([VMX intrinsics not detected]) -fi - -AC_SUBST(VMX_CFLAGS) - -AM_CONDITIONAL(USE_VMX, test $have_vmx_intrinsics = yes) - -dnl ========================================================================== -dnl Check if assembler is gas compatible and supports ARM SIMD instructions -have_arm_simd=no -AC_MSG_CHECKING(whether to use ARM SIMD assembler) -xserver_save_CFLAGS=$CFLAGS -CFLAGS="-x assembler-with-cpp $CFLAGS" -AC_COMPILE_IFELSE([[ -.text -.arch armv6 -.object_arch armv4 -.arm -.altmacro -#ifndef __ARM_EABI__ -#error EABI is required (to be sure that calling conventions are compatible) -#endif -pld [r0] -uqadd8 r0, r0, r0]], have_arm_simd=yes) -CFLAGS=$xserver_save_CFLAGS - -AC_ARG_ENABLE(arm-simd, - [AC_HELP_STRING([--disable-arm-simd], - [disable ARM SIMD fast paths])], - [enable_arm_simd=$enableval], [enable_arm_simd=auto]) - -if test $enable_arm_simd = no ; then - have_arm_simd=disabled -fi - -if test $have_arm_simd = yes ; then - AC_DEFINE(USE_ARM_SIMD, 1, [use ARM SIMD assembly optimizations]) -fi - -AM_CONDITIONAL(USE_ARM_SIMD, test $have_arm_simd = yes) - -AC_MSG_RESULT($have_arm_simd) -if test $enable_arm_simd = yes && test $have_arm_simd = no ; then - AC_MSG_ERROR([ARM SIMD intrinsics not detected]) -fi - -dnl ========================================================================== -dnl Check if assembler is gas compatible and supports NEON instructions -have_arm_neon=no -AC_MSG_CHECKING(whether to use ARM NEON assembler) -xserver_save_CFLAGS=$CFLAGS -CFLAGS="-x assembler-with-cpp $CFLAGS" -AC_COMPILE_IFELSE([[ -.text -.fpu neon -.arch armv7a -.object_arch armv4 -.eabi_attribute 10, 0 -.arm -.altmacro -#ifndef __ARM_EABI__ -#error EABI is required (to be sure that calling conventions are compatible) -#endif -pld [r0] -vmovn.u16 d0, q0]], have_arm_neon=yes) -CFLAGS=$xserver_save_CFLAGS - -AC_ARG_ENABLE(arm-neon, - [AC_HELP_STRING([--disable-arm-neon], - [disable ARM NEON fast paths])], - [enable_arm_neon=$enableval], [enable_arm_neon=auto]) - -if test $enable_arm_neon = no ; then - have_arm_neon=disabled -fi - -if test $have_arm_neon = yes ; then - AC_DEFINE(USE_ARM_NEON, 1, [use ARM NEON assembly optimizations]) -fi - -AM_CONDITIONAL(USE_ARM_NEON, test $have_arm_neon = yes) - -AC_MSG_RESULT($have_arm_neon) -if test $enable_arm_neon = yes && test $have_arm_neon = no ; then - AC_MSG_ERROR([ARM NEON intrinsics not detected]) -fi - -dnl ========================================================================================= -dnl Check for GNU-style inline assembly support - -have_gcc_inline_asm=no -AC_MSG_CHECKING(whether to use GNU-style inline assembler) -AC_COMPILE_IFELSE([ -int main () { - /* Most modern architectures have a NOP instruction, so this is a fairly generic test. */ - asm volatile ( "\tnop\n" : : : "cc", "memory" ); - return 0; -}], have_gcc_inline_asm=yes) - -AC_ARG_ENABLE(gcc-inline-asm, - [AC_HELP_STRING([--disable-gcc-inline-asm], - [disable GNU-style inline assembler])], - [enable_gcc_inline_asm=$enableval], [enable_gcc_inline_asm=auto]) - -if test $enable_gcc_inline_asm = no ; then - have_gcc_inline_asm=disabled -fi - -if test $have_gcc_inline_asm = yes ; then - AC_DEFINE(USE_GCC_INLINE_ASM, 1, [use GNU-style inline assembler]) -fi - -AC_MSG_RESULT($have_gcc_inline_asm) -if test $enable_gcc_inline_asm = yes && test $have_gcc_inline_asm = no ; then - AC_MSG_ERROR([GNU-style inline assembler not detected]) -fi - -AM_CONDITIONAL(USE_GCC_INLINE_ASM, test $have_gcc_inline_asm = yes) - -dnl ============================================== -dnl Static test programs - -AC_ARG_ENABLE(static-testprogs, - [AC_HELP_STRING([--enable-static-testprogs], - [build test programs as static binaries [default=no]])], - [enable_static_testprogs=$enableval], [enable_static_testprogs=no]) - -TESTPROGS_EXTRA_LDFLAGS= -if test "x$enable_static_testprogs" = "xyes" ; then - TESTPROGS_EXTRA_LDFLAGS="-all-static" -fi -AC_SUBST(TESTPROGS_EXTRA_LDFLAGS) - -dnl ============================================== -dnl Timers - -AC_ARG_ENABLE(timers, - [AC_HELP_STRING([--enable-timers], - [enable TIMER_BEGIN and TIMER_END macros [default=no]])], - [enable_timers=$enableval], [enable_timers=no]) - -if test $enable_timers = yes ; then - AC_DEFINE(PIXMAN_TIMERS, 1, [enable TIMER_BEGIN/TIMER_END macros]) -fi -AC_SUBST(PIXMAN_TIMERS) - -dnl =================================== -dnl GTK+ - -AC_ARG_ENABLE(gtk, - [AC_HELP_STRING([--enable-gtk], - [enable tests using GTK+ [default=auto]])], - [enable_gtk=$enableval], [enable_gtk=auto]) - -PKG_PROG_PKG_CONFIG - -if test $enable_gtk = yes ; then - AC_CHECK_LIB([pixman-1], [pixman_version_string]) - PKG_CHECK_MODULES(GTK, [gtk+-2.0 pixman-1]) -fi - -if test $enable_gtk = auto ; then - AC_CHECK_LIB([pixman-1], [pixman_version_string], [enable_gtk=auto], [enable_gtk=no]) -fi - -if test $enable_gtk = auto ; then - PKG_CHECK_MODULES(GTK, [gtk+-2.0 pixman-1], [enable_gtk=yes], [enable_gtk=no]) -fi - -AM_CONDITIONAL(HAVE_GTK, [test "x$enable_gtk" = xyes]) - -AC_SUBST(GTK_CFLAGS) -AC_SUBST(GTK_LIBS) -AC_SUBST(DEP_CFLAGS) -AC_SUBST(DEP_LIBS) - -dnl ===================================== -dnl posix_memalign, sigaction, alarm, gettimeofday - -AC_CHECK_FUNC(posix_memalign, have_posix_memalign=yes, have_posix_memalign=no) -if test x$have_posix_memalign = xyes; then - AC_DEFINE(HAVE_POSIX_MEMALIGN, 1, [Whether we have posix_memalign()]) -fi - -AC_CHECK_FUNC(sigaction, have_sigaction=yes, have_sigaction=no) -if test x$have_sigaction = xyes; then - AC_DEFINE(HAVE_SIGACTION, 1, [Whether we have sigaction()]) -fi - -AC_CHECK_FUNC(alarm, have_alarm=yes, have_alarm=no) -if test x$have_alarm = xyes; then - AC_DEFINE(HAVE_ALARM, 1, [Whether we have alarm()]) -fi - -AC_CHECK_HEADER([sys/mman.h], - [AC_DEFINE(HAVE_SYS_MMAN_H, [1], [Define to 1 if we have <sys/mman.h>])]) - -AC_CHECK_FUNC(mprotect, have_mprotect=yes, have_mprotect=no) -if test x$have_mprotect = xyes; then - AC_DEFINE(HAVE_MPROTECT, 1, [Whether we have mprotect()]) -fi - -AC_CHECK_FUNC(getpagesize, have_getpagesize=yes, have_getpagesize=no) -if test x$have_getpagesize = xyes; then - AC_DEFINE(HAVE_GETPAGESIZE, 1, [Whether we have getpagesize()]) -fi - -AC_CHECK_HEADER([fenv.h], - [AC_DEFINE(HAVE_FENV_H, [1], [Define to 1 if we have <fenv.h>])]) - -AC_CHECK_LIB(m, feenableexcept, have_feenableexcept=yes, have_feenableexcept=no) -if test x$have_feenableexcept = xyes; then - AC_DEFINE(HAVE_FEENABLEEXCEPT, 1, [Whether we have feenableexcept()]) -fi - -AC_CHECK_FUNC(gettimeofday, have_gettimeofday=yes, have_gettimeofday=no) -AC_CHECK_HEADER(sys/time.h, have_sys_time_h=yes, have_sys_time_h=no) -if test x$have_gettimeofday = xyes && test x$have_sys_time_h = xyes; then - AC_DEFINE(HAVE_GETTIMEOFDAY, 1, [Whether we have gettimeofday()]) -fi - -dnl ===================================== -dnl Thread local storage - -support_for__thread=no - -AC_MSG_CHECKING(for __thread) -AC_LINK_IFELSE([ -#if defined(__MINGW32__) && !(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) -#error This MinGW version has broken __thread support -#endif -#ifdef __OpenBSD__ -#error OpenBSD has broken __thread support -#endif -static __thread int x ; -int main () { x = 123; return x; } -], support_for__thread=yes) - -if test $support_for__thread = yes; then - AC_DEFINE([TOOLCHAIN_SUPPORTS__THREAD],[],[Whether the tool chain supports __thread]) -fi - -AC_MSG_RESULT($support_for__thread) - -dnl -dnl posix tls -dnl - -m4_define([pthread_test_program],[dnl -#include <stdlib.h> -#include <pthread.h> - -static pthread_once_t once_control = PTHREAD_ONCE_INIT; -static pthread_key_t key; - -static void -make_key (void) -{ - pthread_key_create (&key, NULL); -} - -int -main () -{ - void *value = NULL; - - if (pthread_once (&once_control, make_key) != 0) - { - value = NULL; - } - else - { - value = pthread_getspecific (key); - if (!value) - { - value = malloc (100); - pthread_setspecific (key, value); - } - } - return 0; -} -]) - -AC_DEFUN([PIXMAN_CHECK_PTHREAD],[dnl - if test "z$support_for_pthread_setspecific" != "zyes"; then - PIXMAN_LINK_WITH_ENV( - [$1], [pthread_test_program], - [PTHREAD_CFLAGS="$CFLAGS" - PTHREAD_LIBS="$LIBS" - PTHREAD_LDFLAGS="$LDFLAGS" - support_for_pthread_setspecific=yes]) - fi -]) - -if test $support_for__thread = no; then - support_for_pthread_setspecific=no - - AC_MSG_CHECKING(for pthread_setspecific) - - PIXMAN_CHECK_PTHREAD([CFLAGS="-D_REENTRANT"; LIBS="-lpthread"]) - PIXMAN_CHECK_PTHREAD([CFLAGS="-pthread"; LDFLAGS="-pthread"]) - PIXMAN_CHECK_PTHREAD([CFLAGS="-D_REENTRANT"; LDFLAGS="-lroot"]) - - if test $support_for_pthread_setspecific = yes; then - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - AC_DEFINE([HAVE_PTHREAD_SETSPECIFIC], [], [Whether pthread_setspecific() is supported]) - fi - - AC_MSG_RESULT($support_for_pthread_setspecific); -fi - -AC_SUBST(TOOLCHAIN_SUPPORTS__THREAD) -AC_SUBST(HAVE_PTHREAD_SETSPECIFIC) -AC_SUBST(PTHREAD_LDFLAGS) -AC_SUBST(PTHREAD_LIBS) - -dnl ===================================== -dnl __attribute__((constructor)) - -support_for_attribute_constructor=no - -AC_MSG_CHECKING(for __attribute__((constructor))) -AC_LINK_IFELSE([ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 7)) -/* attribute 'constructor' is supported since gcc 2.7, but some compilers - * may only pretend to be gcc, so let's try to actually use it - */ -static int x = 1; -static void __attribute__((constructor)) constructor_function () { x = 0; } -int main (void) { return x; } -#else -#error not gcc or gcc version is older than 2.7 -#endif -], support_for_attribute_constructor=yes) - -if test x$support_for_attribute_constructor = xyes; then - AC_DEFINE([TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR], - [],[Whether the tool chain supports __attribute__((constructor))]) -fi - -AC_MSG_RESULT($support_for_attribute_constructor) -AC_SUBST(TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR) - -AC_OUTPUT([pixman-1.pc - pixman-1-uninstalled.pc - Makefile - pixman/Makefile - pixman/pixman-version.h - test/Makefile]) - -m4_if(m4_eval(pixman_minor % 2), [1], [ - echo - echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" - echo - echo " Thanks for testing this development snapshot of pixman. Please" - echo " report any problems you find, either by sending email to " - echo - echo " pixman@lists.freedesktop.org" - echo - echo " or by filing a bug at " - echo - echo " https://bugs.freedesktop.org/enter_bug.cgi?product=pixman " - echo - echo " If you are looking for a stable release of pixman, please note " - echo " that stable releases have _even_ minor version numbers. Ie., " - echo " pixman-0.]m4_eval(pixman_minor & ~1)[.x are stable releases, whereas pixman-$PIXMAN_VERSION_MAJOR.$PIXMAN_VERSION_MINOR.$PIXMAN_VERSION_MICRO is a " - echo " development snapshot that may contain bugs and experimental " - echo " features. " - echo - echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" - echo -]) +dnl Copyright 2005 Red Hat, Inc.
+dnl
+dnl Permission to use, copy, modify, distribute, and sell this software and its
+dnl documentation for any purpose is hereby granted without fee, provided that
+dnl the above copyright notice appear in all copies and that both that
+dnl copyright notice and this permission notice appear in supporting
+dnl documentation, and that the name of Red Hat not be used in
+dnl advertising or publicity pertaining to distribution of the software without
+dnl specific, written prior permission. Red Hat makes no
+dnl representations about the suitability of this software for any purpose. It
+dnl is provided "as is" without express or implied warranty.
+dnl
+dnl RED HAT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+dnl INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+dnl EVENT SHALL RED HAT BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+dnl CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+dnl DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+dnl TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+dnl PERFORMANCE OF THIS SOFTWARE.
+dnl
+dnl Process this file with autoconf to create configure.
+
+AC_PREREQ([2.57])
+
+# Pixman versioning scheme
+#
+# - The version in git has an odd MICRO version number
+#
+# - Released versions both development and stable have an even MICRO
+# version number
+#
+# - Released development versions have an odd MINOR number
+#
+# - Released stable versions have an even MINOR number
+#
+# - Versions that break ABI must have a new MAJOR number
+#
+# - If you break the ABI, then at least this must be done:
+#
+# - increment MAJOR
+#
+# - In the first development release where you break ABI, find
+# all instances of "pixman-n" and change them to pixman-(n+1)
+#
+# This needs to be done at least in
+# configure.ac
+# all Makefile.am's
+# pixman-n.pc.in
+#
+# This ensures that binary incompatible versions can be installed
+# in parallel. See http://www106.pair.com/rhp/parallel.html for
+# more information
+#
+
+m4_define([pixman_major], 0)
+m4_define([pixman_minor], 21)
+m4_define([pixman_micro], 5)
+
+m4_define([pixman_version],[pixman_major.pixman_minor.pixman_micro])
+
+AC_INIT(pixman, pixman_version, [pixman@lists.freedesktop.org], pixman)
+AM_INIT_AUTOMAKE([foreign dist-bzip2])
+
+# Suppress verbose compile lines
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+
+AM_CONFIG_HEADER(config.h)
+
+AC_CANONICAL_HOST
+
+test_CFLAGS=${CFLAGS+set} # We may override autoconf default CFLAGS.
+
+AC_PROG_CC
+AM_PROG_AS
+AC_PROG_LIBTOOL
+AC_CHECK_FUNCS([getisax])
+AC_C_BIGENDIAN
+AC_C_INLINE
+
+dnl PIXMAN_LINK_WITH_ENV(env-setup, program, true-action, false-action)
+dnl
+dnl Compiles and links the given program in the environment setup by env-setup
+dnl and executes true-action on success and false-action on failure.
+AC_DEFUN([PIXMAN_LINK_WITH_ENV],[dnl
+ save_CFLAGS="$CFLAGS"
+ save_LDFLAGS="$LDFLAGS"
+ save_LIBS="$LIBS"
+ CFLAGS=""
+ LDFLAGS=""
+ LIBS=""
+ $1
+ AC_LINK_IFELSE(
+ [$2],
+ [pixman_cc_stderr=`test -f conftest.err && cat conftest.err`
+ pixman_cc_flag=yes],
+ [pixman_cc_stderr=`test -f conftest.err && cat conftest.err`
+ pixman_cc_flag=no])
+
+ if test "x$pixman_cc_stderr" != "x"; then
+ pixman_cc_flag=no
+ fi
+
+ if test "x$pixman_cc_flag" = "xyes"; then
+ ifelse([$3], , :, [$3])
+ else
+ ifelse([$4], , :, [$4])
+ fi
+ CFLAGS="$save_CFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+ LIBS="$save_LIBS"
+])
+
+dnl Find a -Werror for catching warnings.
+WERROR=
+for w in -Werror -errwarn; do
+ if test "z$WERROR" = "z"; then
+ AC_MSG_CHECKING([whether the compiler supports $w])
+ PIXMAN_LINK_WITH_ENV(
+ [CFLAGS=$w],
+ [int main(int c, char **v) { (void)c; (void)v; return 0; }],
+ [WERROR=$w; yesno=yes], [yesno=no])
+ AC_MSG_RESULT($_yesno)
+ fi
+done
+
+dnl PIXMAN_CHECK_CFLAG(flag, [program])
+dnl Adds flag to CFLAGS if the given program links without warnings or errors.
+AC_DEFUN([PIXMAN_CHECK_CFLAG], [dnl
+ AC_MSG_CHECKING([whether the compiler supports $1])
+ PIXMAN_LINK_WITH_ENV(
+ [CFLAGS="$WERROR $1"],
+ [$2
+ int main(int c, char **v) { (void)c; (void)v; return 0; }
+ ],
+ [_yesno=yes],
+ [_yesno=no])
+ if test "x$_yesno" = xyes; then
+ CFLAGS="$CFLAGS $1"
+ fi
+ AC_MSG_RESULT($_yesno)
+])
+
+AC_CHECK_SIZEOF(long)
+
+# Checks for Sun Studio compilers
+AC_CHECK_DECL([__SUNPRO_C], [SUNCC="yes"], [SUNCC="no"])
+AC_CHECK_DECL([__amd64], [AMD64_ABI="yes"], [AMD64_ABI="no"])
+
+# Default CFLAGS to -O -g rather than just the -g from AC_PROG_CC
+# if we're using Sun Studio and neither the user nor a config.site
+# has set CFLAGS.
+if test $SUNCC = yes && \
+ test "$test_CFLAGS" == "" && \
+ test "$CFLAGS" = "-g"
+then
+ CFLAGS="-O -g"
+fi
+
+#
+# We ignore pixman_major in the version here because the major version should
+# always be encoded in the actual library name. Ie., the soname is:
+#
+# pixman-$(pixman_major).0.minor.micro
+#
+m4_define([lt_current], [pixman_minor])
+m4_define([lt_revision], [pixman_micro])
+m4_define([lt_age], [pixman_minor])
+
+LT_VERSION_INFO="lt_current:lt_revision:lt_age"
+
+PIXMAN_VERSION_MAJOR=pixman_major()
+AC_SUBST(PIXMAN_VERSION_MAJOR)
+PIXMAN_VERSION_MINOR=pixman_minor()
+AC_SUBST(PIXMAN_VERSION_MINOR)
+PIXMAN_VERSION_MICRO=pixman_micro()
+AC_SUBST(PIXMAN_VERSION_MICRO)
+
+AC_SUBST(LT_VERSION_INFO)
+
+# Check for dependencies
+
+PIXMAN_CHECK_CFLAG([-Wall])
+PIXMAN_CHECK_CFLAG([-fno-strict-aliasing])
+
+AC_PATH_PROG(PERL, perl, no)
+if test "x$PERL" = xno; then
+ AC_MSG_ERROR([Perl is required to build pixman.])
+fi
+AC_SUBST(PERL)
+
+dnl =========================================================================
+dnl OpenMP for the test suite?
+dnl
+
+# Check for OpenMP support (only supported by autoconf >=2.62)
+OPENMP_CFLAGS=
+m4_ifdef([AC_OPENMP], [AC_OPENMP])
+
+m4_define([openmp_test_program],[dnl
+#include <stdio.h>
+
+extern unsigned int lcg_seed;
+#pragma omp threadprivate(lcg_seed)
+unsigned int lcg_seed;
+
+unsigned function(unsigned a, unsigned b)
+{
+ lcg_seed ^= b;
+ return ((a + b) ^ a ) + lcg_seed;
+}
+
+int main(int argc, char **argv)
+{
+ int i;
+ int n1 = 0, n2 = argc;
+ unsigned checksum = 0;
+ int verbose = argv != NULL;
+ unsigned (*test_function)(unsigned, unsigned);
+ test_function = function;
+ #pragma omp parallel for reduction(+:checksum) default(none) \
+ shared(n1, n2, test_function, verbose)
+ for (i = n1; i < n2; i++)
+ {
+ unsigned crc = test_function (i, 0);
+ if (verbose)
+ printf ("%d: %08X\n", i, crc);
+ checksum += crc;
+ }
+ printf("%u\n", checksum);
+ return 0;
+}
+])
+
+PIXMAN_LINK_WITH_ENV(
+ [CFLAGS="$OPENMP_CFLAGS" LDFLAGS="$OPENMP_CFLAGS"],
+ [openmp_test_program],
+ [have_openmp=yes],
+ [have_openmp=no])
+if test "x$have_openmp" = "xyes"; then
+ AC_DEFINE(USE_OPENMP, 1, [use OpenMP in the test suite])
+else
+ OPENMP_CFLAGS=""
+fi
+AC_SUBST(OPENMP_CFLAGS)
+
+dnl =========================================================================
+dnl -fvisibility stuff
+
+PIXMAN_CHECK_CFLAG([-fvisibility=hidden], [dnl
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#ifdef _WIN32
+#error Have -fvisibility but it is ignored and generates a warning
+#endif
+#else
+error Need GCC 4.0 for visibility
+#endif
+])
+
+PIXMAN_CHECK_CFLAG([-xldscope=hidden], [dnl
+#if defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550)
+#else
+error Need Sun Studio 8 for visibility
+#endif
+])
+
+dnl ===========================================================================
+dnl Check for MMX
+
+if test "x$MMX_CFLAGS" = "x" ; then
+ if test "x$SUNCC" = "xyes"; then
+ # Sun Studio doesn't have an -xarch=mmx flag, so we have to use sse
+ # but if we're building 64-bit, mmx & sse support is on by default and
+ # -xarch=sse throws an error instead
+ if test "$AMD64_ABI" = "no" ; then
+ MMX_CFLAGS="-xarch=sse"
+ fi
+ else
+ MMX_CFLAGS="-mmmx -Winline"
+ fi
+fi
+
+have_mmx_intrinsics=no
+AC_MSG_CHECKING(whether to use MMX intrinsics)
+xserver_save_CFLAGS=$CFLAGS
+CFLAGS="$MMX_CFLAGS $CFLAGS"
+AC_COMPILE_IFELSE([
+#if defined(__GNUC__) && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4))
+error "Need GCC >= 3.4 for MMX intrinsics"
+#endif
+#include <mmintrin.h>
+int main () {
+ __m64 v = _mm_cvtsi32_si64 (1);
+ return _mm_cvtsi64_si32 (v);
+}], have_mmx_intrinsics=yes)
+CFLAGS=$xserver_save_CFLAGS
+
+AC_ARG_ENABLE(mmx,
+ [AC_HELP_STRING([--disable-mmx],
+ [disable MMX fast paths])],
+ [enable_mmx=$enableval], [enable_mmx=auto])
+
+if test $enable_mmx = no ; then
+ have_mmx_intrinsics=disabled
+fi
+
+if test $have_mmx_intrinsics = yes ; then
+ AC_DEFINE(USE_MMX, 1, [use MMX compiler intrinsics])
+else
+ MMX_CFLAGS=
+fi
+
+AC_MSG_RESULT($have_mmx_intrinsics)
+if test $enable_mmx = yes && test $have_mmx_intrinsics = no ; then
+ AC_MSG_ERROR([MMX intrinsics not detected])
+fi
+
+AM_CONDITIONAL(USE_MMX, test $have_mmx_intrinsics = yes)
+
+dnl ===========================================================================
+dnl Check for SSE2
+
+if test "x$SSE2_CFLAGS" = "x" ; then
+ if test "x$SUNCC" = "xyes"; then
+ # SSE2 is enabled by default in the Sun Studio 64-bit environment
+ if test "$AMD64_ABI" = "no" ; then
+ SSE2_CFLAGS="-xarch=sse2"
+ fi
+ else
+ SSE2_CFLAGS="-mmmx -msse2 -Winline"
+ fi
+fi
+
+have_sse2_intrinsics=no
+AC_MSG_CHECKING(whether to use SSE2 intrinsics)
+xserver_save_CFLAGS=$CFLAGS
+CFLAGS="$SSE2_CFLAGS $CFLAGS"
+
+AC_COMPILE_IFELSE([
+#if defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2))
+# if !defined(__amd64__) && !defined(__x86_64__)
+# error "Need GCC >= 4.2 for SSE2 intrinsics on x86"
+# endif
+#endif
+#include <mmintrin.h>
+#include <xmmintrin.h>
+#include <emmintrin.h>
+int main () {
+ __m128i a = _mm_set1_epi32 (0), b = _mm_set1_epi32 (0), c;
+ c = _mm_xor_si128 (a, b);
+ return 0;
+}], have_sse2_intrinsics=yes)
+CFLAGS=$xserver_save_CFLAGS
+
+AC_ARG_ENABLE(sse2,
+ [AC_HELP_STRING([--disable-sse2],
+ [disable SSE2 fast paths])],
+ [enable_sse2=$enableval], [enable_sse2=auto])
+
+if test $enable_sse2 = no ; then
+ have_sse2_intrinsics=disabled
+fi
+
+if test $have_sse2_intrinsics = yes ; then
+ AC_DEFINE(USE_SSE2, 1, [use SSE2 compiler intrinsics])
+fi
+
+AC_MSG_RESULT($have_sse2_intrinsics)
+if test $enable_sse2 = yes && test $have_sse2_intrinsics = no ; then
+ AC_MSG_ERROR([SSE2 intrinsics not detected])
+fi
+
+AM_CONDITIONAL(USE_SSE2, test $have_sse2_intrinsics = yes)
+
+dnl ===========================================================================
+dnl Other special flags needed when building code using MMX or SSE instructions
+case $host_os in
+ solaris*)
+ # When building 32-bit binaries, apply a mapfile to ensure that the
+ # binaries aren't flagged as only able to run on MMX+SSE capable CPUs
+ # since they check at runtime before using those instructions.
+ # Not all linkers grok the mapfile format so we check for that first.
+ if test "$AMD64_ABI" = "no" ; then
+ use_hwcap_mapfile=no
+ AC_MSG_CHECKING(whether to use a hardware capability map file)
+ hwcap_save_LDFLAGS="$LDFLAGS"
+ HWCAP_LDFLAGS='-Wl,-M,$(srcdir)/solaris-hwcap.mapfile'
+ LDFLAGS="$LDFLAGS -Wl,-M,pixman/solaris-hwcap.mapfile"
+ AC_LINK_IFELSE([int main() { return 0; }],
+ use_hwcap_mapfile=yes,
+ HWCAP_LDFLAGS="")
+ LDFLAGS="$hwcap_save_LDFLAGS"
+ AC_MSG_RESULT($use_hwcap_mapfile)
+ fi
+ if test "x$MMX_LDFLAGS" = "x" ; then
+ MMX_LDFLAGS="$HWCAP_LDFLAGS"
+ fi
+ if test "x$SSE2_LDFLAGS" = "x" ; then
+ SSE2_LDFLAGS="$HWCAP_LDFLAGS"
+ fi
+ ;;
+esac
+
+AC_SUBST(MMX_CFLAGS)
+AC_SUBST(MMX_LDFLAGS)
+AC_SUBST(SSE2_CFLAGS)
+AC_SUBST(SSE2_LDFLAGS)
+
+dnl ===========================================================================
+dnl Check for VMX/Altivec
+if test -n "`$CC -v 2>&1 | grep version | grep Apple`"; then
+ VMX_CFLAGS="-faltivec"
+else
+ VMX_CFLAGS="-maltivec -mabi=altivec"
+fi
+
+have_vmx_intrinsics=no
+AC_MSG_CHECKING(whether to use VMX/Altivec intrinsics)
+xserver_save_CFLAGS=$CFLAGS
+CFLAGS="$VMX_CFLAGS $CFLAGS"
+AC_COMPILE_IFELSE([
+#if defined(__GNUC__) && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4))
+error "Need GCC >= 3.4 for sane altivec support"
+#endif
+#include <altivec.h>
+int main () {
+ vector unsigned int v = vec_splat_u32 (1);
+ v = vec_sub (v, v);
+ return 0;
+}], have_vmx_intrinsics=yes)
+CFLAGS=$xserver_save_CFLAGS
+
+AC_ARG_ENABLE(vmx,
+ [AC_HELP_STRING([--disable-vmx],
+ [disable VMX fast paths])],
+ [enable_vmx=$enableval], [enable_vmx=auto])
+
+if test $enable_vmx = no ; then
+ have_vmx_intrinsics=disabled
+fi
+
+if test $have_vmx_intrinsics = yes ; then
+ AC_DEFINE(USE_VMX, 1, [use VMX compiler intrinsics])
+else
+ VMX_CFLAGS=
+fi
+
+AC_MSG_RESULT($have_vmx_intrinsics)
+if test $enable_vmx = yes && test $have_vmx_intrinsics = no ; then
+ AC_MSG_ERROR([VMX intrinsics not detected])
+fi
+
+AC_SUBST(VMX_CFLAGS)
+
+AM_CONDITIONAL(USE_VMX, test $have_vmx_intrinsics = yes)
+
+dnl ==========================================================================
+dnl Check if assembler is gas compatible and supports ARM SIMD instructions
+have_arm_simd=no
+AC_MSG_CHECKING(whether to use ARM SIMD assembler)
+xserver_save_CFLAGS=$CFLAGS
+CFLAGS="-x assembler-with-cpp $CFLAGS"
+AC_COMPILE_IFELSE([[
+.text
+.arch armv6
+.object_arch armv4
+.arm
+.altmacro
+#ifndef __ARM_EABI__
+#error EABI is required (to be sure that calling conventions are compatible)
+#endif
+pld [r0]
+uqadd8 r0, r0, r0]], have_arm_simd=yes)
+CFLAGS=$xserver_save_CFLAGS
+
+AC_ARG_ENABLE(arm-simd,
+ [AC_HELP_STRING([--disable-arm-simd],
+ [disable ARM SIMD fast paths])],
+ [enable_arm_simd=$enableval], [enable_arm_simd=auto])
+
+if test $enable_arm_simd = no ; then
+ have_arm_simd=disabled
+fi
+
+if test $have_arm_simd = yes ; then
+ AC_DEFINE(USE_ARM_SIMD, 1, [use ARM SIMD assembly optimizations])
+fi
+
+AM_CONDITIONAL(USE_ARM_SIMD, test $have_arm_simd = yes)
+
+AC_MSG_RESULT($have_arm_simd)
+if test $enable_arm_simd = yes && test $have_arm_simd = no ; then
+ AC_MSG_ERROR([ARM SIMD intrinsics not detected])
+fi
+
+dnl ==========================================================================
+dnl Check if assembler is gas compatible and supports NEON instructions
+have_arm_neon=no
+AC_MSG_CHECKING(whether to use ARM NEON assembler)
+xserver_save_CFLAGS=$CFLAGS
+CFLAGS="-x assembler-with-cpp $CFLAGS"
+AC_COMPILE_IFELSE([[
+.text
+.fpu neon
+.arch armv7a
+.object_arch armv4
+.eabi_attribute 10, 0
+.arm
+.altmacro
+#ifndef __ARM_EABI__
+#error EABI is required (to be sure that calling conventions are compatible)
+#endif
+pld [r0]
+vmovn.u16 d0, q0]], have_arm_neon=yes)
+CFLAGS=$xserver_save_CFLAGS
+
+AC_ARG_ENABLE(arm-neon,
+ [AC_HELP_STRING([--disable-arm-neon],
+ [disable ARM NEON fast paths])],
+ [enable_arm_neon=$enableval], [enable_arm_neon=auto])
+
+if test $enable_arm_neon = no ; then
+ have_arm_neon=disabled
+fi
+
+if test $have_arm_neon = yes ; then
+ AC_DEFINE(USE_ARM_NEON, 1, [use ARM NEON assembly optimizations])
+fi
+
+AM_CONDITIONAL(USE_ARM_NEON, test $have_arm_neon = yes)
+
+AC_MSG_RESULT($have_arm_neon)
+if test $enable_arm_neon = yes && test $have_arm_neon = no ; then
+ AC_MSG_ERROR([ARM NEON intrinsics not detected])
+fi
+
+dnl =========================================================================================
+dnl Check for GNU-style inline assembly support
+
+have_gcc_inline_asm=no
+AC_MSG_CHECKING(whether to use GNU-style inline assembler)
+AC_COMPILE_IFELSE([
+int main () {
+ /* Most modern architectures have a NOP instruction, so this is a fairly generic test. */
+ asm volatile ( "\tnop\n" : : : "cc", "memory" );
+ return 0;
+}], have_gcc_inline_asm=yes)
+
+AC_ARG_ENABLE(gcc-inline-asm,
+ [AC_HELP_STRING([--disable-gcc-inline-asm],
+ [disable GNU-style inline assembler])],
+ [enable_gcc_inline_asm=$enableval], [enable_gcc_inline_asm=auto])
+
+if test $enable_gcc_inline_asm = no ; then
+ have_gcc_inline_asm=disabled
+fi
+
+if test $have_gcc_inline_asm = yes ; then
+ AC_DEFINE(USE_GCC_INLINE_ASM, 1, [use GNU-style inline assembler])
+fi
+
+AC_MSG_RESULT($have_gcc_inline_asm)
+if test $enable_gcc_inline_asm = yes && test $have_gcc_inline_asm = no ; then
+ AC_MSG_ERROR([GNU-style inline assembler not detected])
+fi
+
+AM_CONDITIONAL(USE_GCC_INLINE_ASM, test $have_gcc_inline_asm = yes)
+
+dnl ==============================================
+dnl Static test programs
+
+AC_ARG_ENABLE(static-testprogs,
+ [AC_HELP_STRING([--enable-static-testprogs],
+ [build test programs as static binaries [default=no]])],
+ [enable_static_testprogs=$enableval], [enable_static_testprogs=no])
+
+TESTPROGS_EXTRA_LDFLAGS=
+if test "x$enable_static_testprogs" = "xyes" ; then
+ TESTPROGS_EXTRA_LDFLAGS="-all-static"
+fi
+AC_SUBST(TESTPROGS_EXTRA_LDFLAGS)
+
+dnl ==============================================
+dnl Timers
+
+AC_ARG_ENABLE(timers,
+ [AC_HELP_STRING([--enable-timers],
+ [enable TIMER_BEGIN and TIMER_END macros [default=no]])],
+ [enable_timers=$enableval], [enable_timers=no])
+
+if test $enable_timers = yes ; then
+ AC_DEFINE(PIXMAN_TIMERS, 1, [enable TIMER_BEGIN/TIMER_END macros])
+fi
+AC_SUBST(PIXMAN_TIMERS)
+
+dnl ===================================
+dnl GTK+
+
+AC_ARG_ENABLE(gtk,
+ [AC_HELP_STRING([--enable-gtk],
+ [enable tests using GTK+ [default=auto]])],
+ [enable_gtk=$enableval], [enable_gtk=auto])
+
+PKG_PROG_PKG_CONFIG
+
+if test $enable_gtk = yes ; then
+ AC_CHECK_LIB([pixman-1], [pixman_version_string])
+ PKG_CHECK_MODULES(GTK, [gtk+-2.0 pixman-1])
+fi
+
+if test $enable_gtk = auto ; then
+ AC_CHECK_LIB([pixman-1], [pixman_version_string], [enable_gtk=auto], [enable_gtk=no])
+fi
+
+if test $enable_gtk = auto ; then
+ PKG_CHECK_MODULES(GTK, [gtk+-2.0 pixman-1], [enable_gtk=yes], [enable_gtk=no])
+fi
+
+AM_CONDITIONAL(HAVE_GTK, [test "x$enable_gtk" = xyes])
+
+AC_SUBST(GTK_CFLAGS)
+AC_SUBST(GTK_LIBS)
+AC_SUBST(DEP_CFLAGS)
+AC_SUBST(DEP_LIBS)
+
+dnl =====================================
+dnl posix_memalign, sigaction, alarm, gettimeofday
+
+AC_CHECK_FUNC(posix_memalign, have_posix_memalign=yes, have_posix_memalign=no)
+if test x$have_posix_memalign = xyes; then
+ AC_DEFINE(HAVE_POSIX_MEMALIGN, 1, [Whether we have posix_memalign()])
+fi
+
+AC_CHECK_FUNC(sigaction, have_sigaction=yes, have_sigaction=no)
+if test x$have_sigaction = xyes; then
+ AC_DEFINE(HAVE_SIGACTION, 1, [Whether we have sigaction()])
+fi
+
+AC_CHECK_FUNC(alarm, have_alarm=yes, have_alarm=no)
+if test x$have_alarm = xyes; then
+ AC_DEFINE(HAVE_ALARM, 1, [Whether we have alarm()])
+fi
+
+AC_CHECK_HEADER([sys/mman.h],
+ [AC_DEFINE(HAVE_SYS_MMAN_H, [1], [Define to 1 if we have <sys/mman.h>])])
+
+AC_CHECK_FUNC(mprotect, have_mprotect=yes, have_mprotect=no)
+if test x$have_mprotect = xyes; then
+ AC_DEFINE(HAVE_MPROTECT, 1, [Whether we have mprotect()])
+fi
+
+AC_CHECK_FUNC(getpagesize, have_getpagesize=yes, have_getpagesize=no)
+if test x$have_getpagesize = xyes; then
+ AC_DEFINE(HAVE_GETPAGESIZE, 1, [Whether we have getpagesize()])
+fi
+
+AC_CHECK_HEADER([fenv.h],
+ [AC_DEFINE(HAVE_FENV_H, [1], [Define to 1 if we have <fenv.h>])])
+
+AC_CHECK_LIB(m, feenableexcept, have_feenableexcept=yes, have_feenableexcept=no)
+if test x$have_feenableexcept = xyes; then
+ AC_DEFINE(HAVE_FEENABLEEXCEPT, 1, [Whether we have feenableexcept()])
+fi
+
+AC_CHECK_FUNC(gettimeofday, have_gettimeofday=yes, have_gettimeofday=no)
+AC_CHECK_HEADER(sys/time.h, have_sys_time_h=yes, have_sys_time_h=no)
+if test x$have_gettimeofday = xyes && test x$have_sys_time_h = xyes; then
+ AC_DEFINE(HAVE_GETTIMEOFDAY, 1, [Whether we have gettimeofday()])
+fi
+
+dnl =====================================
+dnl Thread local storage
+
+support_for__thread=no
+
+AC_MSG_CHECKING(for __thread)
+AC_LINK_IFELSE([
+#if defined(__MINGW32__) && !(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+#error This MinGW version has broken __thread support
+#endif
+#ifdef __OpenBSD__
+#error OpenBSD has broken __thread support
+#endif
+static __thread int x ;
+int main () { x = 123; return x; }
+], support_for__thread=yes)
+
+if test $support_for__thread = yes; then
+ AC_DEFINE([TOOLCHAIN_SUPPORTS__THREAD],[],[Whether the tool chain supports __thread])
+fi
+
+AC_MSG_RESULT($support_for__thread)
+
+dnl
+dnl posix tls
+dnl
+
+m4_define([pthread_test_program],[dnl
+#include <stdlib.h>
+#include <pthread.h>
+
+static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+static pthread_key_t key;
+
+static void
+make_key (void)
+{
+ pthread_key_create (&key, NULL);
+}
+
+int
+main ()
+{
+ void *value = NULL;
+
+ if (pthread_once (&once_control, make_key) != 0)
+ {
+ value = NULL;
+ }
+ else
+ {
+ value = pthread_getspecific (key);
+ if (!value)
+ {
+ value = malloc (100);
+ pthread_setspecific (key, value);
+ }
+ }
+ return 0;
+}
+])
+
+AC_DEFUN([PIXMAN_CHECK_PTHREAD],[dnl
+ if test "z$support_for_pthread_setspecific" != "zyes"; then
+ PIXMAN_LINK_WITH_ENV(
+ [$1], [pthread_test_program],
+ [PTHREAD_CFLAGS="$CFLAGS"
+ PTHREAD_LIBS="$LIBS"
+ PTHREAD_LDFLAGS="$LDFLAGS"
+ support_for_pthread_setspecific=yes])
+ fi
+])
+
+if test $support_for__thread = no; then
+ support_for_pthread_setspecific=no
+
+ AC_MSG_CHECKING(for pthread_setspecific)
+
+ PIXMAN_CHECK_PTHREAD([CFLAGS="-D_REENTRANT"; LIBS="-lpthread"])
+ PIXMAN_CHECK_PTHREAD([CFLAGS="-pthread"; LDFLAGS="-pthread"])
+ PIXMAN_CHECK_PTHREAD([CFLAGS="-D_REENTRANT"; LDFLAGS="-lroot"])
+
+ if test $support_for_pthread_setspecific = yes; then
+ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+ AC_DEFINE([HAVE_PTHREAD_SETSPECIFIC], [], [Whether pthread_setspecific() is supported])
+ fi
+
+ AC_MSG_RESULT($support_for_pthread_setspecific);
+fi
+
+AC_SUBST(TOOLCHAIN_SUPPORTS__THREAD)
+AC_SUBST(HAVE_PTHREAD_SETSPECIFIC)
+AC_SUBST(PTHREAD_LDFLAGS)
+AC_SUBST(PTHREAD_LIBS)
+
+dnl =====================================
+dnl __attribute__((constructor))
+
+support_for_attribute_constructor=no
+
+AC_MSG_CHECKING(for __attribute__((constructor)))
+AC_LINK_IFELSE([
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 7))
+/* attribute 'constructor' is supported since gcc 2.7, but some compilers
+ * may only pretend to be gcc, so let's try to actually use it
+ */
+static int x = 1;
+static void __attribute__((constructor)) constructor_function () { x = 0; }
+int main (void) { return x; }
+#else
+#error not gcc or gcc version is older than 2.7
+#endif
+], support_for_attribute_constructor=yes)
+
+if test x$support_for_attribute_constructor = xyes; then
+ AC_DEFINE([TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR],
+ [],[Whether the tool chain supports __attribute__((constructor))])
+fi
+
+AC_MSG_RESULT($support_for_attribute_constructor)
+AC_SUBST(TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR)
+
+AC_OUTPUT([pixman-1.pc
+ pixman-1-uninstalled.pc
+ Makefile
+ pixman/Makefile
+ pixman/pixman-version.h
+ demos/Makefile
+ test/Makefile])
+
+m4_if(m4_eval(pixman_minor % 2), [1], [
+ echo
+ echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
+ echo
+ echo " Thanks for testing this development snapshot of pixman. Please"
+ echo " report any problems you find, either by sending email to "
+ echo
+ echo " pixman@lists.freedesktop.org"
+ echo
+ echo " or by filing a bug at "
+ echo
+ echo " https://bugs.freedesktop.org/enter_bug.cgi?product=pixman "
+ echo
+ echo " If you are looking for a stable release of pixman, please note "
+ echo " that stable releases have _even_ minor version numbers. Ie., "
+ echo " pixman-0.]m4_eval(pixman_minor & ~1)[.x are stable releases, whereas pixman-$PIXMAN_VERSION_MAJOR.$PIXMAN_VERSION_MINOR.$PIXMAN_VERSION_MICRO is a "
+ echo " development snapshot that may contain bugs and experimental "
+ echo " features. "
+ echo
+ echo "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
+ echo
+])
diff --git a/pixman/demos/Makefile.am b/pixman/demos/Makefile.am new file mode 100644 index 000000000..2dcdfd350 --- /dev/null +++ b/pixman/demos/Makefile.am @@ -0,0 +1,34 @@ +if HAVE_GTK + +AM_CFLAGS = @OPENMP_CFLAGS@ +AM_LDFLAGS = @OPENMP_CFLAGS@ + +LDADD = $(GTK_LIBS) $(top_builddir)/pixman/libpixman-1.la -lm +INCLUDES = -I$(top_srcdir)/pixman -I$(top_builddir)/pixman $(GTK_CFLAGS) + +GTK_UTILS = gtk-utils.c gtk-utils.h + +DEMOS = \ + clip-test \ + clip-in \ + composite-test \ + gradient-test \ + radial-test \ + alpha-test \ + screen-test \ + convolution-test \ + trap-test + +gradient_test_SOURCES = gradient-test.c $(GTK_UTILS) +alpha_test_SOURCES = alpha-test.c $(GTK_UTILS) +composite_test_SOURCES = composite-test.c $(GTK_UTILS) +clip_test_SOURCES = clip-test.c $(GTK_UTILS) +clip_in_SOURCES = clip-in.c $(GTK_UTILS) +trap_test_SOURCES = trap-test.c $(GTK_UTILS) +screen_test_SOURCES = screen-test.c $(GTK_UTILS) +convolution_test_SOURCES = convolution-test.c $(GTK_UTILS) +radial_test_SOURCES = radial-test.c ../test/utils.c ../test/utils.h $(GTK_UTILS) + +noinst_PROGRAMS = $(DEMOS) + +endif diff --git a/pixman/test/alpha-test.c b/pixman/demos/alpha-test.c index 92c208142..92c208142 100644 --- a/pixman/test/alpha-test.c +++ b/pixman/demos/alpha-test.c diff --git a/pixman/test/clip-in.c b/pixman/demos/clip-in.c index 51579811f..51579811f 100644 --- a/pixman/test/clip-in.c +++ b/pixman/demos/clip-in.c diff --git a/pixman/test/clip-test.c b/pixman/demos/clip-test.c index aa0df4482..aa0df4482 100644 --- a/pixman/test/clip-test.c +++ b/pixman/demos/clip-test.c diff --git a/pixman/test/composite-test.c b/pixman/demos/composite-test.c index 79d5d5eac..79d5d5eac 100644 --- a/pixman/test/composite-test.c +++ b/pixman/demos/composite-test.c diff --git a/pixman/test/convolution-test.c b/pixman/demos/convolution-test.c index da284af7b..da284af7b 100644 --- a/pixman/test/convolution-test.c +++ b/pixman/demos/convolution-test.c diff --git a/pixman/test/gradient-test.c b/pixman/demos/gradient-test.c index fc84844b0..fc84844b0 100644 --- a/pixman/test/gradient-test.c +++ b/pixman/demos/gradient-test.c diff --git a/pixman/test/gtk-utils.c b/pixman/demos/gtk-utils.c index f45cdc912..f45cdc912 100644 --- a/pixman/test/gtk-utils.c +++ b/pixman/demos/gtk-utils.c diff --git a/pixman/test/gtk-utils.h b/pixman/demos/gtk-utils.h index 2cb13bcf0..2cb13bcf0 100644 --- a/pixman/test/gtk-utils.h +++ b/pixman/demos/gtk-utils.h diff --git a/pixman/test/radial-test.c b/pixman/demos/radial-test.c index 5d716c339..35e90d786 100644 --- a/pixman/test/radial-test.c +++ b/pixman/demos/radial-test.c @@ -1,4 +1,4 @@ -#include "utils.h" +#include "../test/utils.h" #include "gtk-utils.h" #define NUM_GRADIENTS 7 diff --git a/pixman/test/screen-test.c b/pixman/demos/screen-test.c index e69dba3de..e69dba3de 100644 --- a/pixman/test/screen-test.c +++ b/pixman/demos/screen-test.c diff --git a/pixman/test/trap-test.c b/pixman/demos/trap-test.c index 19295e7a5..19295e7a5 100644 --- a/pixman/test/trap-test.c +++ b/pixman/demos/trap-test.c diff --git a/pixman/pixman/pixman-arm-common.h b/pixman/pixman/pixman-arm-common.h index 372e9f9a8..7420d9305 100644 --- a/pixman/pixman/pixman-arm-common.h +++ b/pixman/pixman/pixman-arm-common.h @@ -1,319 +1,364 @@ -/* - * Copyright © 2010 Nokia Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) - */ - -#ifndef PIXMAN_ARM_COMMON_H -#define PIXMAN_ARM_COMMON_H - -#include "pixman-fast-path.h" - -/* Define some macros which can expand into proxy functions between - * ARM assembly optimized functions and the rest of pixman fast path API. - * - * All the low level ARM assembly functions have to use ARM EABI - * calling convention and take up to 8 arguments: - * width, height, dst, dst_stride, src, src_stride, mask, mask_stride - * - * The arguments are ordered with the most important coming first (the - * first 4 arguments are passed to function in registers, the rest are - * on stack). The last arguments are optional, for example if the - * function is not using mask, then 'mask' and 'mask_stride' can be - * omitted when doing a function call. - * - * Arguments 'src' and 'mask' contain either a pointer to the top left - * pixel of the composited rectangle or a pixel color value depending - * on the function type. In the case of just a color value (solid source - * or mask), the corresponding stride argument is unused. - */ - -#define SKIP_ZERO_SRC 1 -#define SKIP_ZERO_MASK 2 - -#define PIXMAN_ARM_BIND_FAST_PATH_SRC_DST(cputype, name, \ - src_type, src_cnt, \ - dst_type, dst_cnt) \ -void \ -pixman_composite_##name##_asm_##cputype (int32_t w, \ - int32_t h, \ - dst_type *dst, \ - int32_t dst_stride, \ - src_type *src, \ - int32_t src_stride); \ - \ -static void \ -cputype##_composite_##name (pixman_implementation_t *imp, \ - pixman_op_t op, \ - pixman_image_t * src_image, \ - pixman_image_t * mask_image, \ - pixman_image_t * dst_image, \ - int32_t src_x, \ - int32_t src_y, \ - int32_t mask_x, \ - int32_t mask_y, \ - int32_t dest_x, \ - int32_t dest_y, \ - int32_t width, \ - int32_t height) \ -{ \ - dst_type *dst_line; \ - src_type *src_line; \ - int32_t dst_stride, src_stride; \ - \ - PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ - src_stride, src_line, src_cnt); \ - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \ - dst_stride, dst_line, dst_cnt); \ - \ - pixman_composite_##name##_asm_##cputype (width, height, \ - dst_line, dst_stride, \ - src_line, src_stride); \ -} - -#define PIXMAN_ARM_BIND_FAST_PATH_N_DST(flags, cputype, name, \ - dst_type, dst_cnt) \ -void \ -pixman_composite_##name##_asm_##cputype (int32_t w, \ - int32_t h, \ - dst_type *dst, \ - int32_t dst_stride, \ - uint32_t src); \ - \ -static void \ -cputype##_composite_##name (pixman_implementation_t *imp, \ - pixman_op_t op, \ - pixman_image_t * src_image, \ - pixman_image_t * mask_image, \ - pixman_image_t * dst_image, \ - int32_t src_x, \ - int32_t src_y, \ - int32_t mask_x, \ - int32_t mask_y, \ - int32_t dest_x, \ - int32_t dest_y, \ - int32_t width, \ - int32_t height) \ -{ \ - dst_type *dst_line; \ - int32_t dst_stride; \ - uint32_t src; \ - \ - src = _pixman_image_get_solid ( \ - imp, src_image, dst_image->bits.format); \ - \ - if ((flags & SKIP_ZERO_SRC) && src == 0) \ - return; \ - \ - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \ - dst_stride, dst_line, dst_cnt); \ - \ - pixman_composite_##name##_asm_##cputype (width, height, \ - dst_line, dst_stride, \ - src); \ -} - -#define PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST(flags, cputype, name, \ - mask_type, mask_cnt, \ - dst_type, dst_cnt) \ -void \ -pixman_composite_##name##_asm_##cputype (int32_t w, \ - int32_t h, \ - dst_type *dst, \ - int32_t dst_stride, \ - uint32_t src, \ - int32_t unused, \ - mask_type *mask, \ - int32_t mask_stride); \ - \ -static void \ -cputype##_composite_##name (pixman_implementation_t *imp, \ - pixman_op_t op, \ - pixman_image_t * src_image, \ - pixman_image_t * mask_image, \ - pixman_image_t * dst_image, \ - int32_t src_x, \ - int32_t src_y, \ - int32_t mask_x, \ - int32_t mask_y, \ - int32_t dest_x, \ - int32_t dest_y, \ - int32_t width, \ - int32_t height) \ -{ \ - dst_type *dst_line; \ - mask_type *mask_line; \ - int32_t dst_stride, mask_stride; \ - uint32_t src; \ - \ - src = _pixman_image_get_solid ( \ - imp, src_image, dst_image->bits.format); \ - \ - if ((flags & SKIP_ZERO_SRC) && src == 0) \ - return; \ - \ - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \ - dst_stride, dst_line, dst_cnt); \ - PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \ - mask_stride, mask_line, mask_cnt); \ - \ - pixman_composite_##name##_asm_##cputype (width, height, \ - dst_line, dst_stride, \ - src, 0, \ - mask_line, mask_stride); \ -} - -#define PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST(flags, cputype, name, \ - src_type, src_cnt, \ - dst_type, dst_cnt) \ -void \ -pixman_composite_##name##_asm_##cputype (int32_t w, \ - int32_t h, \ - dst_type *dst, \ - int32_t dst_stride, \ - src_type *src, \ - int32_t src_stride, \ - uint32_t mask); \ - \ -static void \ -cputype##_composite_##name (pixman_implementation_t *imp, \ - pixman_op_t op, \ - pixman_image_t * src_image, \ - pixman_image_t * mask_image, \ - pixman_image_t * dst_image, \ - int32_t src_x, \ - int32_t src_y, \ - int32_t mask_x, \ - int32_t mask_y, \ - int32_t dest_x, \ - int32_t dest_y, \ - int32_t width, \ - int32_t height) \ -{ \ - dst_type *dst_line; \ - src_type *src_line; \ - int32_t dst_stride, src_stride; \ - uint32_t mask; \ - \ - mask = _pixman_image_get_solid ( \ - imp, mask_image, dst_image->bits.format); \ - \ - if ((flags & SKIP_ZERO_MASK) && mask == 0) \ - return; \ - \ - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \ - dst_stride, dst_line, dst_cnt); \ - PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ - src_stride, src_line, src_cnt); \ - \ - pixman_composite_##name##_asm_##cputype (width, height, \ - dst_line, dst_stride, \ - src_line, src_stride, \ - mask); \ -} - -#define PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST(cputype, name, \ - src_type, src_cnt, \ - mask_type, mask_cnt, \ - dst_type, dst_cnt) \ -void \ -pixman_composite_##name##_asm_##cputype (int32_t w, \ - int32_t h, \ - dst_type *dst, \ - int32_t dst_stride, \ - src_type *src, \ - int32_t src_stride, \ - mask_type *mask, \ - int32_t mask_stride); \ - \ -static void \ -cputype##_composite_##name (pixman_implementation_t *imp, \ - pixman_op_t op, \ - pixman_image_t * src_image, \ - pixman_image_t * mask_image, \ - pixman_image_t * dst_image, \ - int32_t src_x, \ - int32_t src_y, \ - int32_t mask_x, \ - int32_t mask_y, \ - int32_t dest_x, \ - int32_t dest_y, \ - int32_t width, \ - int32_t height) \ -{ \ - dst_type *dst_line; \ - src_type *src_line; \ - mask_type *mask_line; \ - int32_t dst_stride, src_stride, mask_stride; \ - \ - PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \ - dst_stride, dst_line, dst_cnt); \ - PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ - src_stride, src_line, src_cnt); \ - PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \ - mask_stride, mask_line, mask_cnt); \ - \ - pixman_composite_##name##_asm_##cputype (width, height, \ - dst_line, dst_stride, \ - src_line, src_stride, \ - mask_line, mask_stride); \ -} - -#define PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST(cputype, name, op, \ - src_type, dst_type) \ -void \ -pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype ( \ - int32_t w, \ - dst_type * dst, \ - src_type * src, \ - pixman_fixed_t vx, \ - pixman_fixed_t unit_x);\ - \ -static force_inline void \ -scaled_nearest_scanline_##cputype##_##name##_##op (dst_type * pd, \ - src_type * ps, \ - int32_t w, \ - pixman_fixed_t vx, \ - pixman_fixed_t unit_x, \ - pixman_fixed_t max_vx) \ -{ \ - pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype (w, pd, ps, \ - vx, unit_x);\ -} \ - \ -FAST_NEAREST_MAINLOOP (cputype##_##name##_cover_##op, \ - scaled_nearest_scanline_##cputype##_##name##_##op, \ - src_type, dst_type, COVER) \ -FAST_NEAREST_MAINLOOP (cputype##_##name##_none_##op, \ - scaled_nearest_scanline_##cputype##_##name##_##op, \ - src_type, dst_type, NONE) \ -FAST_NEAREST_MAINLOOP (cputype##_##name##_pad_##op, \ - scaled_nearest_scanline_##cputype##_##name##_##op, \ - src_type, dst_type, PAD) - -/* Provide entries for the fast path table */ -#define PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \ - SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \ - SIMPLE_NEAREST_FAST_PATH_NONE (op,s,d,func), \ - SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func) - -#endif +/*
+ * Copyright © 2010 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
+ */
+
+#ifndef PIXMAN_ARM_COMMON_H
+#define PIXMAN_ARM_COMMON_H
+
+#include "pixman-fast-path.h"
+
+/* Define some macros which can expand into proxy functions between
+ * ARM assembly optimized functions and the rest of pixman fast path API.
+ *
+ * All the low level ARM assembly functions have to use ARM EABI
+ * calling convention and take up to 8 arguments:
+ * width, height, dst, dst_stride, src, src_stride, mask, mask_stride
+ *
+ * The arguments are ordered with the most important coming first (the
+ * first 4 arguments are passed to function in registers, the rest are
+ * on stack). The last arguments are optional, for example if the
+ * function is not using mask, then 'mask' and 'mask_stride' can be
+ * omitted when doing a function call.
+ *
+ * Arguments 'src' and 'mask' contain either a pointer to the top left
+ * pixel of the composited rectangle or a pixel color value depending
+ * on the function type. In the case of just a color value (solid source
+ * or mask), the corresponding stride argument is unused.
+ */
+
+#define SKIP_ZERO_SRC 1
+#define SKIP_ZERO_MASK 2
+
+#define PIXMAN_ARM_BIND_FAST_PATH_SRC_DST(cputype, name, \
+ src_type, src_cnt, \
+ dst_type, dst_cnt) \
+void \
+pixman_composite_##name##_asm_##cputype (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ src_type *src, \
+ int32_t src_stride); \
+ \
+static void \
+cputype##_composite_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ src_type *src_line; \
+ int32_t dst_stride, src_stride; \
+ \
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \
+ src_stride, src_line, src_cnt); \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ \
+ pixman_composite_##name##_asm_##cputype (width, height, \
+ dst_line, dst_stride, \
+ src_line, src_stride); \
+}
+
+#define PIXMAN_ARM_BIND_FAST_PATH_N_DST(flags, cputype, name, \
+ dst_type, dst_cnt) \
+void \
+pixman_composite_##name##_asm_##cputype (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ uint32_t src); \
+ \
+static void \
+cputype##_composite_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ int32_t dst_stride; \
+ uint32_t src; \
+ \
+ src = _pixman_image_get_solid ( \
+ imp, src_image, dst_image->bits.format); \
+ \
+ if ((flags & SKIP_ZERO_SRC) && src == 0) \
+ return; \
+ \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ \
+ pixman_composite_##name##_asm_##cputype (width, height, \
+ dst_line, dst_stride, \
+ src); \
+}
+
+#define PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST(flags, cputype, name, \
+ mask_type, mask_cnt, \
+ dst_type, dst_cnt) \
+void \
+pixman_composite_##name##_asm_##cputype (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ uint32_t src, \
+ int32_t unused, \
+ mask_type *mask, \
+ int32_t mask_stride); \
+ \
+static void \
+cputype##_composite_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ mask_type *mask_line; \
+ int32_t dst_stride, mask_stride; \
+ uint32_t src; \
+ \
+ src = _pixman_image_get_solid ( \
+ imp, src_image, dst_image->bits.format); \
+ \
+ if ((flags & SKIP_ZERO_SRC) && src == 0) \
+ return; \
+ \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \
+ mask_stride, mask_line, mask_cnt); \
+ \
+ pixman_composite_##name##_asm_##cputype (width, height, \
+ dst_line, dst_stride, \
+ src, 0, \
+ mask_line, mask_stride); \
+}
+
+#define PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST(flags, cputype, name, \
+ src_type, src_cnt, \
+ dst_type, dst_cnt) \
+void \
+pixman_composite_##name##_asm_##cputype (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ src_type *src, \
+ int32_t src_stride, \
+ uint32_t mask); \
+ \
+static void \
+cputype##_composite_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ src_type *src_line; \
+ int32_t dst_stride, src_stride; \
+ uint32_t mask; \
+ \
+ mask = _pixman_image_get_solid ( \
+ imp, mask_image, dst_image->bits.format); \
+ \
+ if ((flags & SKIP_ZERO_MASK) && mask == 0) \
+ return; \
+ \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \
+ src_stride, src_line, src_cnt); \
+ \
+ pixman_composite_##name##_asm_##cputype (width, height, \
+ dst_line, dst_stride, \
+ src_line, src_stride, \
+ mask); \
+}
+
+#define PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST(cputype, name, \
+ src_type, src_cnt, \
+ mask_type, mask_cnt, \
+ dst_type, dst_cnt) \
+void \
+pixman_composite_##name##_asm_##cputype (int32_t w, \
+ int32_t h, \
+ dst_type *dst, \
+ int32_t dst_stride, \
+ src_type *src, \
+ int32_t src_stride, \
+ mask_type *mask, \
+ int32_t mask_stride); \
+ \
+static void \
+cputype##_composite_##name (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ pixman_image_t * src_image, \
+ pixman_image_t * mask_image, \
+ pixman_image_t * dst_image, \
+ int32_t src_x, \
+ int32_t src_y, \
+ int32_t mask_x, \
+ int32_t mask_y, \
+ int32_t dest_x, \
+ int32_t dest_y, \
+ int32_t width, \
+ int32_t height) \
+{ \
+ dst_type *dst_line; \
+ src_type *src_line; \
+ mask_type *mask_line; \
+ int32_t dst_stride, src_stride, mask_stride; \
+ \
+ PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type, \
+ dst_stride, dst_line, dst_cnt); \
+ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \
+ src_stride, src_line, src_cnt); \
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \
+ mask_stride, mask_line, mask_cnt); \
+ \
+ pixman_composite_##name##_asm_##cputype (width, height, \
+ dst_line, dst_stride, \
+ src_line, src_stride, \
+ mask_line, mask_stride); \
+}
+
+#define PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST(cputype, name, op, \
+ src_type, dst_type) \
+void \
+pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype ( \
+ int32_t w, \
+ dst_type * dst, \
+ const src_type * src, \
+ pixman_fixed_t vx, \
+ pixman_fixed_t unit_x); \
+ \
+static force_inline void \
+scaled_nearest_scanline_##cputype##_##name##_##op (dst_type * pd, \
+ const src_type * ps, \
+ int32_t w, \
+ pixman_fixed_t vx, \
+ pixman_fixed_t unit_x, \
+ pixman_fixed_t max_vx, \
+ pixman_bool_t zero_src) \
+{ \
+ pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype (w, pd, ps, \
+ vx, unit_x);\
+} \
+ \
+FAST_NEAREST_MAINLOOP (cputype##_##name##_cover_##op, \
+ scaled_nearest_scanline_##cputype##_##name##_##op, \
+ src_type, dst_type, COVER) \
+FAST_NEAREST_MAINLOOP (cputype##_##name##_none_##op, \
+ scaled_nearest_scanline_##cputype##_##name##_##op, \
+ src_type, dst_type, NONE) \
+FAST_NEAREST_MAINLOOP (cputype##_##name##_pad_##op, \
+ scaled_nearest_scanline_##cputype##_##name##_##op, \
+ src_type, dst_type, PAD)
+
+/* Provide entries for the fast path table */
+#define PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \
+ SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \
+ SIMPLE_NEAREST_FAST_PATH_NONE (op,s,d,func), \
+ SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func)
+
+#define PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST(flags, cputype, name, op, \
+ src_type, dst_type) \
+void \
+pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype ( \
+ int32_t w, \
+ dst_type * dst, \
+ const src_type * src, \
+ pixman_fixed_t vx, \
+ pixman_fixed_t unit_x, \
+ const uint8_t * mask); \
+ \
+static force_inline void \
+scaled_nearest_scanline_##cputype##_##name##_##op (const uint8_t * mask, \
+ dst_type * pd, \
+ const src_type * ps, \
+ int32_t w, \
+ pixman_fixed_t vx, \
+ pixman_fixed_t unit_x, \
+ pixman_fixed_t max_vx, \
+ pixman_bool_t zero_src) \
+{ \
+ if ((flags & SKIP_ZERO_SRC) && zero_src) \
+ return; \
+ pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype (w, pd, ps, \
+ vx, unit_x, \
+ mask); \
+} \
+ \
+FAST_NEAREST_MAINLOOP_COMMON (cputype##_##name##_cover_##op, \
+ scaled_nearest_scanline_##cputype##_##name##_##op,\
+ src_type, uint8_t, dst_type, COVER, TRUE, FALSE)\
+FAST_NEAREST_MAINLOOP_COMMON (cputype##_##name##_none_##op, \
+ scaled_nearest_scanline_##cputype##_##name##_##op,\
+ src_type, uint8_t, dst_type, NONE, TRUE, FALSE) \
+FAST_NEAREST_MAINLOOP_COMMON (cputype##_##name##_pad_##op, \
+ scaled_nearest_scanline_##cputype##_##name##_##op,\
+ src_type, uint8_t, dst_type, PAD, TRUE, FALSE)
+
+/* Provide entries for the fast path table */
+#define PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH(op,s,d,func) \
+ SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER (op,s,d,func), \
+ SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE (op,s,d,func), \
+ SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD (op,s,d,func)
+
+#endif
diff --git a/pixman/pixman/pixman-arm-neon-asm.S b/pixman/pixman/pixman-arm-neon-asm.S index 51533d42f..617217beb 100644 --- a/pixman/pixman/pixman-arm-neon-asm.S +++ b/pixman/pixman/pixman-arm-neon-asm.S @@ -2363,3 +2363,31 @@ generate_composite_function_nearest_scanline \ pixman_composite_src_0565_8888_process_pixblock_head, \
pixman_composite_src_0565_8888_process_pixblock_tail, \
pixman_composite_src_0565_8888_process_pixblock_tail_head
+
+generate_composite_function_nearest_scanline \
+ pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \
+ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+ 8, /* number of pixels, processed in a single block */ \
+ default_init_need_all_regs, \
+ default_cleanup_need_all_regs, \
+ pixman_composite_over_8888_8_0565_process_pixblock_head, \
+ pixman_composite_over_8888_8_0565_process_pixblock_tail, \
+ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
+ 28, /* dst_w_basereg */ \
+ 4, /* dst_r_basereg */ \
+ 8, /* src_basereg */ \
+ 24 /* mask_basereg */
+
+generate_composite_function_nearest_scanline \
+ pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \
+ FLAG_DST_READWRITE, \
+ 8, /* number of pixels, processed in a single block */ \
+ default_init_need_all_regs, \
+ default_cleanup_need_all_regs, \
+ pixman_composite_over_0565_8_0565_process_pixblock_head, \
+ pixman_composite_over_0565_8_0565_process_pixblock_tail, \
+ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
+ 28, /* dst_w_basereg */ \
+ 10, /* dst_r_basereg */ \
+ 8, /* src_basereg */ \
+ 15 /* mask_basereg */
diff --git a/pixman/pixman/pixman-arm-neon.c b/pixman/pixman/pixman-arm-neon.c index 7d6c83775..24a8b0886 100644 --- a/pixman/pixman/pixman-arm-neon.c +++ b/pixman/pixman/pixman-arm-neon.c @@ -1,430 +1,441 @@ -/* - * Copyright © 2009 ARM Ltd, Movial Creative Technologies Oy - * - * Permission to use, copy, modify, distribute, and sell this software and its - * documentation for any purpose is hereby granted without fee, provided that - * the above copyright notice appear in all copies and that both that - * copyright notice and this permission notice appear in supporting - * documentation, and that the name of ARM Ltd not be used in - * advertising or publicity pertaining to distribution of the software without - * specific, written prior permission. ARM Ltd makes no - * representations about the suitability of this software for any purpose. It - * is provided "as is" without express or implied warranty. - * - * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS - * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY - * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN - * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING - * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS - * SOFTWARE. - * - * Author: Ian Rickards (ian.rickards@arm.com) - * Author: Jonathan Morton (jonathan.morton@movial.com) - * Author: Markku Vire (markku.vire@movial.com) - * - */ - -#ifdef HAVE_CONFIG_H -#include <config.h> -#endif - -#include <string.h> -#include "pixman-private.h" -#include "pixman-arm-common.h" - -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_8888, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_x888_8888, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_0565, - uint16_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0888, - uint8_t, 3, uint8_t, 3) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_0565, - uint32_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_8888, - uint16_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_8888_rev, - uint8_t, 3, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0565_rev, - uint8_t, 3, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_pixbuf_8888, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_rpixbuf_8888, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8_8, - uint8_t, 1, uint8_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8888_8888, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_0565, - uint32_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_8888, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, out_reverse_8_0565, - uint8_t, 1, uint16_t, 1) - -PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_0565, - uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_8888, - uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_reverse_n_8888, - uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_DST (0, neon, in_n_8, - uint8_t, 1) - -PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_0565, - uint8_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8888, - uint8_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_8888_ca, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8, - uint8_t, 1, uint8_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8, - uint8_t, 1, uint8_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8888, - uint8_t, 1, uint32_t, 1) - -PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_8888, - uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_0565, - uint32_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_0565_n_0565, - uint16_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, add_8888_n_8888, - uint32_t, 1, uint32_t, 1) - -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8_8_8, - uint8_t, 1, uint8_t, 1, uint8_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_0565_8_0565, - uint16_t, 1, uint8_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8_8888, - uint32_t, 1, uint8_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8888_8888, - uint32_t, 1, uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_8888, - uint32_t, 1, uint8_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8888_8888, - uint32_t, 1, uint32_t, 1, uint32_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_0565, - uint32_t, 1, uint8_t, 1, uint16_t, 1) -PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_0565_8_0565, - uint16_t, 1, uint8_t, 1, uint16_t, 1) - -PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_8888, OVER, - uint32_t, uint32_t) -PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, OVER, - uint32_t, uint16_t) -PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, SRC, - uint32_t, uint16_t) -PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 0565_8888, SRC, - uint16_t, uint32_t) - -void -pixman_composite_src_n_8_asm_neon (int32_t w, - int32_t h, - uint8_t *dst, - int32_t dst_stride, - uint8_t src); - -void -pixman_composite_src_n_0565_asm_neon (int32_t w, - int32_t h, - uint16_t *dst, - int32_t dst_stride, - uint16_t src); - -void -pixman_composite_src_n_8888_asm_neon (int32_t w, - int32_t h, - uint32_t *dst, - int32_t dst_stride, - uint32_t src); - -static pixman_bool_t -pixman_fill_neon (uint32_t *bits, - int stride, - int bpp, - int x, - int y, - int width, - int height, - uint32_t _xor) -{ - /* stride is always multiple of 32bit units in pixman */ - uint32_t byte_stride = stride * sizeof(uint32_t); - - switch (bpp) - { - case 8: - pixman_composite_src_n_8_asm_neon ( - width, - height, - (uint8_t *)(((char *) bits) + y * byte_stride + x), - byte_stride, - _xor & 0xff); - return TRUE; - case 16: - pixman_composite_src_n_0565_asm_neon ( - width, - height, - (uint16_t *)(((char *) bits) + y * byte_stride + x * 2), - byte_stride / 2, - _xor & 0xffff); - return TRUE; - case 32: - pixman_composite_src_n_8888_asm_neon ( - width, - height, - (uint32_t *)(((char *) bits) + y * byte_stride + x * 4), - byte_stride / 4, - _xor); - return TRUE; - default: - return FALSE; - } -} - -static pixman_bool_t -pixman_blt_neon (uint32_t *src_bits, - uint32_t *dst_bits, - int src_stride, - int dst_stride, - int src_bpp, - int dst_bpp, - int src_x, - int src_y, - int dst_x, - int dst_y, - int width, - int height) -{ - if (src_bpp != dst_bpp) - return FALSE; - - switch (src_bpp) - { - case 16: - pixman_composite_src_0565_0565_asm_neon ( - width, height, - (uint16_t *)(((char *) dst_bits) + - dst_y * dst_stride * 4 + dst_x * 2), dst_stride * 2, - (uint16_t *)(((char *) src_bits) + - src_y * src_stride * 4 + src_x * 2), src_stride * 2); - return TRUE; - case 32: - pixman_composite_src_8888_8888_asm_neon ( - width, height, - (uint32_t *)(((char *) dst_bits) + - dst_y * dst_stride * 4 + dst_x * 4), dst_stride, - (uint32_t *)(((char *) src_bits) + - src_y * src_stride * 4 + src_x * 4), src_stride); - return TRUE; - default: - return FALSE; - } -} - -static const pixman_fast_path_t arm_neon_fast_paths[] = -{ - PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, neon_composite_src_0565_0565), - PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, neon_composite_src_0565_0565), - PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565), - PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565), - PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565), - PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565), - PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, neon_composite_src_0565_8888), - PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, neon_composite_src_0565_8888), - PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, neon_composite_src_0565_8888), - PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, neon_composite_src_0565_8888), - PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888), - PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888), - PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888), - PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888), - PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, neon_composite_src_8888_8888), - PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, neon_composite_src_8888_8888), - PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888), - PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888), - PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, neon_composite_src_0888_0888), - PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, x8r8g8b8, neon_composite_src_0888_8888_rev), - PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, r5g6b5, neon_composite_src_0888_0565_rev), - PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8r8g8b8, neon_composite_src_pixbuf_8888), - PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8b8g8r8, neon_composite_src_rpixbuf_8888), - PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8r8g8b8, neon_composite_src_rpixbuf_8888), - PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8b8g8r8, neon_composite_src_pixbuf_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8, neon_composite_over_n_8_8), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, neon_composite_over_n_8_0565), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, neon_composite_over_n_8_0565), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, neon_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, neon_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, neon_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, neon_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, neon_composite_over_n_0565), - PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, neon_composite_over_n_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, neon_composite_over_n_8888), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, neon_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, neon_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, neon_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, neon_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, neon_composite_over_8888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, neon_composite_over_8888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, r5g6b5, neon_composite_over_8888_n_0565), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, b5g6r5, neon_composite_over_8888_n_0565), - PIXMAN_STD_FAST_PATH (OVER, r5g6b5, solid, r5g6b5, neon_composite_over_0565_n_0565), - PIXMAN_STD_FAST_PATH (OVER, b5g6r5, solid, b5g6r5, neon_composite_over_0565_n_0565), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, neon_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, neon_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, neon_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, neon_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, r5g6b5, neon_composite_over_8888_8_0565), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, b5g6r5, neon_composite_over_8888_8_0565), - PIXMAN_STD_FAST_PATH (OVER, r5g6b5, a8, r5g6b5, neon_composite_over_0565_8_0565), - PIXMAN_STD_FAST_PATH (OVER, b5g6r5, a8, b5g6r5, neon_composite_over_0565_8_0565), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_over_8888_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, neon_composite_over_8888_0565), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, neon_composite_over_8888_0565), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, neon_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, neon_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, neon_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, neon_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888), - PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888), - PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, neon_composite_add_n_8_8), - PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8r8g8b8, neon_composite_add_n_8_8888), - PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8b8g8r8, neon_composite_add_n_8_8888), - PIXMAN_STD_FAST_PATH (ADD, a8, a8, a8, neon_composite_add_8_8_8), - PIXMAN_STD_FAST_PATH (ADD, r5g6b5, a8, r5g6b5, neon_composite_add_0565_8_0565), - PIXMAN_STD_FAST_PATH (ADD, b5g6r5, a8, b5g6r5, neon_composite_add_0565_8_0565), - PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8, a8r8g8b8, neon_composite_add_8888_8_8888), - PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, a8, a8b8g8r8, neon_composite_add_8888_8_8888), - PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_add_8888_8888_8888), - PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, solid, a8r8g8b8, neon_composite_add_8888_n_8888), - PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, solid, a8b8g8r8, neon_composite_add_8888_n_8888), - PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, neon_composite_add_8_8), - PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, neon_composite_add_8888_8888), - PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, neon_composite_add_8888_8888), - PIXMAN_STD_FAST_PATH (IN, solid, null, a8, neon_composite_in_n_8), - PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, neon_composite_over_reverse_n_8888), - PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, neon_composite_over_reverse_n_8888), - PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, r5g6b5, neon_composite_out_reverse_8_0565), - PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, b5g6r5, neon_composite_out_reverse_8_0565), - - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, neon_8888_8888), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8888), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, neon_8888_8888), - - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, neon_8888_0565), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, b5g6r5, neon_8888_0565), - - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_0565), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_0565), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, b5g6r5, neon_8888_0565), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, b5g6r5, neon_8888_0565), - - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, b5g6r5, x8b8g8r8, neon_0565_8888), - PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_8888), - /* Note: NONE repeat is not supported yet */ - SIMPLE_NEAREST_FAST_PATH_COVER (SRC, r5g6b5, a8r8g8b8, neon_0565_8888), - SIMPLE_NEAREST_FAST_PATH_COVER (SRC, b5g6r5, a8b8g8r8, neon_0565_8888), - SIMPLE_NEAREST_FAST_PATH_PAD (SRC, r5g6b5, a8r8g8b8, neon_0565_8888), - SIMPLE_NEAREST_FAST_PATH_PAD (SRC, b5g6r5, a8b8g8r8, neon_0565_8888), - - { PIXMAN_OP_NONE }, -}; - -static pixman_bool_t -arm_neon_blt (pixman_implementation_t *imp, - uint32_t * src_bits, - uint32_t * dst_bits, - int src_stride, - int dst_stride, - int src_bpp, - int dst_bpp, - int src_x, - int src_y, - int dst_x, - int dst_y, - int width, - int height) -{ - if (!pixman_blt_neon ( - src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, - src_x, src_y, dst_x, dst_y, width, height)) - - { - return _pixman_implementation_blt ( - imp->delegate, - src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, - src_x, src_y, dst_x, dst_y, width, height); - } - - return TRUE; -} - -static pixman_bool_t -arm_neon_fill (pixman_implementation_t *imp, - uint32_t * bits, - int stride, - int bpp, - int x, - int y, - int width, - int height, - uint32_t xor) -{ - if (pixman_fill_neon (bits, stride, bpp, x, y, width, height, xor)) - return TRUE; - - return _pixman_implementation_fill ( - imp->delegate, bits, stride, bpp, x, y, width, height, xor); -} - -#define BIND_COMBINE_U(name) \ -void \ -pixman_composite_scanline_##name##_mask_asm_neon (int32_t w, \ - const uint32_t *dst, \ - const uint32_t *src, \ - const uint32_t *mask); \ - \ -void \ -pixman_composite_scanline_##name##_asm_neon (int32_t w, \ - const uint32_t *dst, \ - const uint32_t *src); \ - \ -static void \ -neon_combine_##name##_u (pixman_implementation_t *imp, \ - pixman_op_t op, \ - uint32_t * dest, \ - const uint32_t * src, \ - const uint32_t * mask, \ - int width) \ -{ \ - if (mask) \ - pixman_composite_scanline_##name##_mask_asm_neon (width, dest, \ - src, mask); \ - else \ - pixman_composite_scanline_##name##_asm_neon (width, dest, src); \ -} - -BIND_COMBINE_U (over) -BIND_COMBINE_U (add) -BIND_COMBINE_U (out_reverse) - -pixman_implementation_t * -_pixman_implementation_create_arm_neon (pixman_implementation_t *fallback) -{ - pixman_implementation_t *imp = - _pixman_implementation_create (fallback, arm_neon_fast_paths); - - imp->combine_32[PIXMAN_OP_OVER] = neon_combine_over_u; - imp->combine_32[PIXMAN_OP_ADD] = neon_combine_add_u; - imp->combine_32[PIXMAN_OP_OUT_REVERSE] = neon_combine_out_reverse_u; - - imp->blt = arm_neon_blt; - imp->fill = arm_neon_fill; - - return imp; -} +/*
+ * Copyright © 2009 ARM Ltd, Movial Creative Technologies Oy
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of ARM Ltd not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. ARM Ltd makes no
+ * representations about the suitability of this software for any purpose. It
+ * is provided "as is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author: Ian Rickards (ian.rickards@arm.com)
+ * Author: Jonathan Morton (jonathan.morton@movial.com)
+ * Author: Markku Vire (markku.vire@movial.com)
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <string.h>
+#include "pixman-private.h"
+#include "pixman-arm-common.h"
+
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_8888,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_x888_8888,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_0565,
+ uint16_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0888,
+ uint8_t, 3, uint8_t, 3)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_0565,
+ uint32_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_8888,
+ uint16_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_8888_rev,
+ uint8_t, 3, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0565_rev,
+ uint8_t, 3, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_pixbuf_8888,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_rpixbuf_8888,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8_8,
+ uint8_t, 1, uint8_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8888_8888,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_0565,
+ uint32_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_8888,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, out_reverse_8_0565,
+ uint8_t, 1, uint16_t, 1)
+
+PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_0565,
+ uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_8888,
+ uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_reverse_n_8888,
+ uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_DST (0, neon, in_n_8,
+ uint8_t, 1)
+
+PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_0565,
+ uint8_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8888,
+ uint8_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_8888_ca,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8,
+ uint8_t, 1, uint8_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8,
+ uint8_t, 1, uint8_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8888,
+ uint8_t, 1, uint32_t, 1)
+
+PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_8888,
+ uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_0565,
+ uint32_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_0565_n_0565,
+ uint16_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, add_8888_n_8888,
+ uint32_t, 1, uint32_t, 1)
+
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8_8_8,
+ uint8_t, 1, uint8_t, 1, uint8_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_0565_8_0565,
+ uint16_t, 1, uint8_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8_8888,
+ uint32_t, 1, uint8_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8888_8888,
+ uint32_t, 1, uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_8888,
+ uint32_t, 1, uint8_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8888_8888,
+ uint32_t, 1, uint32_t, 1, uint32_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_0565,
+ uint32_t, 1, uint8_t, 1, uint16_t, 1)
+PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_0565_8_0565,
+ uint16_t, 1, uint8_t, 1, uint16_t, 1)
+
+PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_8888, OVER,
+ uint32_t, uint32_t)
+PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, OVER,
+ uint32_t, uint16_t)
+PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, SRC,
+ uint32_t, uint16_t)
+PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 0565_8888, SRC,
+ uint16_t, uint32_t)
+
+PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_0565,
+ OVER, uint32_t, uint16_t)
+PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, neon, 0565_8_0565,
+ OVER, uint16_t, uint16_t)
+
+void
+pixman_composite_src_n_8_asm_neon (int32_t w,
+ int32_t h,
+ uint8_t *dst,
+ int32_t dst_stride,
+ uint8_t src);
+
+void
+pixman_composite_src_n_0565_asm_neon (int32_t w,
+ int32_t h,
+ uint16_t *dst,
+ int32_t dst_stride,
+ uint16_t src);
+
+void
+pixman_composite_src_n_8888_asm_neon (int32_t w,
+ int32_t h,
+ uint32_t *dst,
+ int32_t dst_stride,
+ uint32_t src);
+
+static pixman_bool_t
+pixman_fill_neon (uint32_t *bits,
+ int stride,
+ int bpp,
+ int x,
+ int y,
+ int width,
+ int height,
+ uint32_t _xor)
+{
+ /* stride is always multiple of 32bit units in pixman */
+ uint32_t byte_stride = stride * sizeof(uint32_t);
+
+ switch (bpp)
+ {
+ case 8:
+ pixman_composite_src_n_8_asm_neon (
+ width,
+ height,
+ (uint8_t *)(((char *) bits) + y * byte_stride + x),
+ byte_stride,
+ _xor & 0xff);
+ return TRUE;
+ case 16:
+ pixman_composite_src_n_0565_asm_neon (
+ width,
+ height,
+ (uint16_t *)(((char *) bits) + y * byte_stride + x * 2),
+ byte_stride / 2,
+ _xor & 0xffff);
+ return TRUE;
+ case 32:
+ pixman_composite_src_n_8888_asm_neon (
+ width,
+ height,
+ (uint32_t *)(((char *) bits) + y * byte_stride + x * 4),
+ byte_stride / 4,
+ _xor);
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+static pixman_bool_t
+pixman_blt_neon (uint32_t *src_bits,
+ uint32_t *dst_bits,
+ int src_stride,
+ int dst_stride,
+ int src_bpp,
+ int dst_bpp,
+ int src_x,
+ int src_y,
+ int dst_x,
+ int dst_y,
+ int width,
+ int height)
+{
+ if (src_bpp != dst_bpp)
+ return FALSE;
+
+ switch (src_bpp)
+ {
+ case 16:
+ pixman_composite_src_0565_0565_asm_neon (
+ width, height,
+ (uint16_t *)(((char *) dst_bits) +
+ dst_y * dst_stride * 4 + dst_x * 2), dst_stride * 2,
+ (uint16_t *)(((char *) src_bits) +
+ src_y * src_stride * 4 + src_x * 2), src_stride * 2);
+ return TRUE;
+ case 32:
+ pixman_composite_src_8888_8888_asm_neon (
+ width, height,
+ (uint32_t *)(((char *) dst_bits) +
+ dst_y * dst_stride * 4 + dst_x * 4), dst_stride,
+ (uint32_t *)(((char *) src_bits) +
+ src_y * src_stride * 4 + src_x * 4), src_stride);
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+static const pixman_fast_path_t arm_neon_fast_paths[] =
+{
+ PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, neon_composite_src_0565_0565),
+ PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, neon_composite_src_0565_0565),
+ PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565),
+ PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565),
+ PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565),
+ PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565),
+ PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, neon_composite_src_0565_8888),
+ PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, neon_composite_src_0565_8888),
+ PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, neon_composite_src_0565_8888),
+ PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, neon_composite_src_0565_8888),
+ PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, neon_composite_src_8888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, neon_composite_src_8888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, neon_composite_src_0888_0888),
+ PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, x8r8g8b8, neon_composite_src_0888_8888_rev),
+ PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, r5g6b5, neon_composite_src_0888_0565_rev),
+ PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8r8g8b8, neon_composite_src_pixbuf_8888),
+ PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8b8g8r8, neon_composite_src_rpixbuf_8888),
+ PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8r8g8b8, neon_composite_src_rpixbuf_8888),
+ PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8b8g8r8, neon_composite_src_pixbuf_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8, neon_composite_over_n_8_8),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, neon_composite_over_n_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, neon_composite_over_n_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, neon_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, neon_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, neon_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, neon_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, neon_composite_over_n_0565),
+ PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, neon_composite_over_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, neon_composite_over_n_8888),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, neon_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, neon_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, neon_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, neon_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, neon_composite_over_8888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, neon_composite_over_8888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, r5g6b5, neon_composite_over_8888_n_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, b5g6r5, neon_composite_over_8888_n_0565),
+ PIXMAN_STD_FAST_PATH (OVER, r5g6b5, solid, r5g6b5, neon_composite_over_0565_n_0565),
+ PIXMAN_STD_FAST_PATH (OVER, b5g6r5, solid, b5g6r5, neon_composite_over_0565_n_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, neon_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, neon_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, neon_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, neon_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, r5g6b5, neon_composite_over_8888_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, b5g6r5, neon_composite_over_8888_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, r5g6b5, a8, r5g6b5, neon_composite_over_0565_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, b5g6r5, a8, b5g6r5, neon_composite_over_0565_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_over_8888_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, neon_composite_over_8888_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, neon_composite_over_8888_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, neon_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, neon_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, neon_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, neon_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888),
+ PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, neon_composite_add_n_8_8),
+ PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8r8g8b8, neon_composite_add_n_8_8888),
+ PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8b8g8r8, neon_composite_add_n_8_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8, a8, a8, neon_composite_add_8_8_8),
+ PIXMAN_STD_FAST_PATH (ADD, r5g6b5, a8, r5g6b5, neon_composite_add_0565_8_0565),
+ PIXMAN_STD_FAST_PATH (ADD, b5g6r5, a8, b5g6r5, neon_composite_add_0565_8_0565),
+ PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8, a8r8g8b8, neon_composite_add_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, a8, a8b8g8r8, neon_composite_add_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_add_8888_8888_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, solid, a8r8g8b8, neon_composite_add_8888_n_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, solid, a8b8g8r8, neon_composite_add_8888_n_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, neon_composite_add_8_8),
+ PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, neon_composite_add_8888_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, neon_composite_add_8888_8888),
+ PIXMAN_STD_FAST_PATH (IN, solid, null, a8, neon_composite_in_n_8),
+ PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, neon_composite_over_reverse_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, neon_composite_over_reverse_n_8888),
+ PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, r5g6b5, neon_composite_out_reverse_8_0565),
+ PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, b5g6r5, neon_composite_out_reverse_8_0565),
+
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, neon_8888_8888),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8888),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, neon_8888_8888),
+
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, neon_8888_0565),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, b5g6r5, neon_8888_0565),
+
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_0565),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_0565),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, b5g6r5, neon_8888_0565),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, b5g6r5, neon_8888_0565),
+
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, b5g6r5, x8b8g8r8, neon_0565_8888),
+ PIXMAN_ARM_SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_8888),
+ /* Note: NONE repeat is not supported yet */
+ SIMPLE_NEAREST_FAST_PATH_COVER (SRC, r5g6b5, a8r8g8b8, neon_0565_8888),
+ SIMPLE_NEAREST_FAST_PATH_COVER (SRC, b5g6r5, a8b8g8r8, neon_0565_8888),
+ SIMPLE_NEAREST_FAST_PATH_PAD (SRC, r5g6b5, a8r8g8b8, neon_0565_8888),
+ SIMPLE_NEAREST_FAST_PATH_PAD (SRC, b5g6r5, a8b8g8r8, neon_0565_8888),
+
+ PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8r8g8b8, r5g6b5, neon_8888_8_0565),
+ PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8b8g8r8, b5g6r5, neon_8888_8_0565),
+
+ PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, r5g6b5, r5g6b5, neon_0565_8_0565),
+ PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, b5g6r5, b5g6r5, neon_0565_8_0565),
+
+ { PIXMAN_OP_NONE },
+};
+
+static pixman_bool_t
+arm_neon_blt (pixman_implementation_t *imp,
+ uint32_t * src_bits,
+ uint32_t * dst_bits,
+ int src_stride,
+ int dst_stride,
+ int src_bpp,
+ int dst_bpp,
+ int src_x,
+ int src_y,
+ int dst_x,
+ int dst_y,
+ int width,
+ int height)
+{
+ if (!pixman_blt_neon (
+ src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+ src_x, src_y, dst_x, dst_y, width, height))
+
+ {
+ return _pixman_implementation_blt (
+ imp->delegate,
+ src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+ src_x, src_y, dst_x, dst_y, width, height);
+ }
+
+ return TRUE;
+}
+
+static pixman_bool_t
+arm_neon_fill (pixman_implementation_t *imp,
+ uint32_t * bits,
+ int stride,
+ int bpp,
+ int x,
+ int y,
+ int width,
+ int height,
+ uint32_t xor)
+{
+ if (pixman_fill_neon (bits, stride, bpp, x, y, width, height, xor))
+ return TRUE;
+
+ return _pixman_implementation_fill (
+ imp->delegate, bits, stride, bpp, x, y, width, height, xor);
+}
+
+#define BIND_COMBINE_U(name) \
+void \
+pixman_composite_scanline_##name##_mask_asm_neon (int32_t w, \
+ const uint32_t *dst, \
+ const uint32_t *src, \
+ const uint32_t *mask); \
+ \
+void \
+pixman_composite_scanline_##name##_asm_neon (int32_t w, \
+ const uint32_t *dst, \
+ const uint32_t *src); \
+ \
+static void \
+neon_combine_##name##_u (pixman_implementation_t *imp, \
+ pixman_op_t op, \
+ uint32_t * dest, \
+ const uint32_t * src, \
+ const uint32_t * mask, \
+ int width) \
+{ \
+ if (mask) \
+ pixman_composite_scanline_##name##_mask_asm_neon (width, dest, \
+ src, mask); \
+ else \
+ pixman_composite_scanline_##name##_asm_neon (width, dest, src); \
+}
+
+BIND_COMBINE_U (over)
+BIND_COMBINE_U (add)
+BIND_COMBINE_U (out_reverse)
+
+pixman_implementation_t *
+_pixman_implementation_create_arm_neon (pixman_implementation_t *fallback)
+{
+ pixman_implementation_t *imp =
+ _pixman_implementation_create (fallback, arm_neon_fast_paths);
+
+ imp->combine_32[PIXMAN_OP_OVER] = neon_combine_over_u;
+ imp->combine_32[PIXMAN_OP_ADD] = neon_combine_add_u;
+ imp->combine_32[PIXMAN_OP_OUT_REVERSE] = neon_combine_out_reverse_u;
+
+ imp->blt = arm_neon_blt;
+ imp->fill = arm_neon_fill;
+
+ return imp;
+}
diff --git a/pixman/pixman/pixman-fast-path.c b/pixman/pixman/pixman-fast-path.c index 4cb8321aa..5c0965d83 100644 --- a/pixman/pixman/pixman-fast-path.c +++ b/pixman/pixman/pixman-fast-path.c @@ -1410,12 +1410,13 @@ FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL) /* Use more unrolling for src_0565_0565 because it is typically CPU bound */
static force_inline void
-scaled_nearest_scanline_565_565_SRC (uint16_t * dst,
- uint16_t * src,
- int32_t w,
- pixman_fixed_t vx,
- pixman_fixed_t unit_x,
- pixman_fixed_t max_vx)
+scaled_nearest_scanline_565_565_SRC (uint16_t * dst,
+ const uint16_t * src,
+ int32_t w,
+ pixman_fixed_t vx,
+ pixman_fixed_t unit_x,
+ pixman_fixed_t max_vx,
+ pixman_bool_t fully_transparent_src)
{
uint16_t tmp1, tmp2, tmp3, tmp4;
while ((w -= 4) >= 0)
diff --git a/pixman/pixman/pixman-fast-path.h b/pixman/pixman/pixman-fast-path.h index bb7032d86..b5f524353 100644 --- a/pixman/pixman/pixman-fast-path.h +++ b/pixman/pixman/pixman-fast-path.h @@ -138,18 +138,22 @@ pad_repeat_get_scanline_bounds (int32_t source_image_width, #define FAST_NEAREST_SCANLINE(scanline_func_name, SRC_FORMAT, DST_FORMAT, \
src_type_t, dst_type_t, OP, repeat_mode) \
static force_inline void \
-scanline_func_name (dst_type_t *dst, \
- src_type_t *src, \
- int32_t w, \
- pixman_fixed_t vx, \
- pixman_fixed_t unit_x, \
- pixman_fixed_t max_vx) \
+scanline_func_name (dst_type_t *dst, \
+ const src_type_t *src, \
+ int32_t w, \
+ pixman_fixed_t vx, \
+ pixman_fixed_t unit_x, \
+ pixman_fixed_t max_vx, \
+ pixman_bool_t fully_transparent_src) \
{ \
uint32_t d; \
src_type_t s1, s2; \
uint8_t a1, a2; \
int x1, x2; \
\
+ if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER && fully_transparent_src) \
+ return; \
+ \
if (PIXMAN_OP_ ## OP != PIXMAN_OP_SRC && PIXMAN_OP_ ## OP != PIXMAN_OP_OVER) \
abort(); \
\
@@ -245,8 +249,8 @@ scanline_func_name (dst_type_t *dst, \ } \
}
-#define FAST_NEAREST_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, dst_type_t, \
- repeat_mode) \
+#define FAST_NEAREST_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, mask_type_t, \
+ dst_type_t, repeat_mode, have_mask, mask_is_solid) \
static void \
fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, \
pixman_op_t op, \
@@ -263,6 +267,7 @@ fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, int32_t height) \
{ \
dst_type_t *dst_line; \
+ mask_type_t *mask_line; \
src_type_t *src_first_line; \
int y; \
pixman_fixed_t max_vx = INT32_MAX; /* suppress uninitialized variable warning */ \
@@ -274,9 +279,19 @@ fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, \
src_type_t *src; \
dst_type_t *dst; \
- int src_stride, dst_stride; \
+ mask_type_t solid_mask; \
+ const mask_type_t *mask = &solid_mask; \
+ int src_stride, mask_stride, dst_stride; \
\
PIXMAN_IMAGE_GET_LINE (dst_image, dst_x, dst_y, dst_type_t, dst_stride, dst_line, 1); \
+ if (have_mask) \
+ { \
+ if (mask_is_solid) \
+ solid_mask = _pixman_image_get_solid (imp, mask_image, dst_image->bits.format); \
+ else \
+ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type_t, \
+ mask_stride, mask_line, 1); \
+ } \
/* pass in 0 instead of src_x and src_y because src_x and src_y need to be \
* transformed from destination space to source space */ \
PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \
@@ -321,6 +336,11 @@ fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, { \
dst = dst_line; \
dst_line += dst_stride; \
+ if (have_mask && !mask_is_solid) \
+ { \
+ mask = mask_line; \
+ mask_line += mask_stride; \
+ } \
\
y = vy >> 16; \
vy += unit_y; \
@@ -332,60 +352,87 @@ fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, src = src_first_line + src_stride * y; \
if (left_pad > 0) \
{ \
- scanline_func (dst, src, left_pad, 0, 0, 0); \
+ scanline_func (mask, dst, src, left_pad, 0, 0, 0, FALSE); \
} \
if (width > 0) \
{ \
- scanline_func (dst + left_pad, src, width, vx, unit_x, 0); \
+ scanline_func (mask + (mask_is_solid ? 0 : left_pad), \
+ dst + left_pad, src, width, vx, unit_x, 0, FALSE); \
} \
if (right_pad > 0) \
{ \
- scanline_func (dst + left_pad + width, src + src_image->bits.width - 1, \
- right_pad, 0, 0, 0); \
+ scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \
+ dst + left_pad + width, src + src_image->bits.width - 1, \
+ right_pad, 0, 0, 0, FALSE); \
} \
} \
else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \
{ \
- static src_type_t zero[1] = { 0 }; \
+ static const src_type_t zero[1] = { 0 }; \
if (y < 0 || y >= src_image->bits.height) \
{ \
- scanline_func (dst, zero, left_pad + width + right_pad, 0, 0, 0); \
+ scanline_func (mask, dst, zero, left_pad + width + right_pad, 0, 0, 0, TRUE); \
continue; \
} \
src = src_first_line + src_stride * y; \
if (left_pad > 0) \
{ \
- scanline_func (dst, zero, left_pad, 0, 0, 0); \
+ scanline_func (mask, dst, zero, left_pad, 0, 0, 0, TRUE); \
} \
if (width > 0) \
{ \
- scanline_func (dst + left_pad, src, width, vx, unit_x, 0); \
+ scanline_func (mask + (mask_is_solid ? 0 : left_pad), \
+ dst + left_pad, src, width, vx, unit_x, 0, FALSE); \
} \
if (right_pad > 0) \
{ \
- scanline_func (dst + left_pad + width, zero, right_pad, 0, 0, 0); \
+ scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \
+ dst + left_pad + width, zero, right_pad, 0, 0, 0, TRUE); \
} \
} \
else \
{ \
src = src_first_line + src_stride * y; \
- scanline_func (dst, src, width, vx, unit_x, max_vx); \
+ scanline_func (mask, dst, src, width, vx, unit_x, max_vx, FALSE); \
} \
} \
}
/* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */
-#define FAST_NEAREST_MAINLOOP(scale_func_name, scanline_func, src_type_t, dst_type_t, \
+#define FAST_NEAREST_MAINLOOP_COMMON(scale_func_name, scanline_func, src_type_t, mask_type_t, \
+ dst_type_t, repeat_mode, have_mask, mask_is_solid) \
+ FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, mask_type_t, \
+ dst_type_t, repeat_mode, have_mask, mask_is_solid)
+
+#define FAST_NEAREST_MAINLOOP_NOMASK(scale_func_name, scanline_func, src_type_t, dst_type_t, \
repeat_mode) \
- FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, dst_type_t, \
+ static force_inline void \
+ scanline_func##scale_func_name##_wrapper ( \
+ const uint8_t *mask, \
+ dst_type_t *dst, \
+ const src_type_t *src, \
+ int32_t w, \
+ pixman_fixed_t vx, \
+ pixman_fixed_t unit_x, \
+ pixman_fixed_t max_vx, \
+ pixman_bool_t fully_transparent_src) \
+ { \
+ scanline_func (dst, src, w, vx, unit_x, max_vx, fully_transparent_src); \
+ } \
+ FAST_NEAREST_MAINLOOP_INT (scale_func_name, scanline_func##scale_func_name##_wrapper, \
+ src_type_t, uint8_t, dst_type_t, repeat_mode, FALSE, FALSE)
+
+#define FAST_NEAREST_MAINLOOP(scale_func_name, scanline_func, src_type_t, dst_type_t, \
repeat_mode) \
+ FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name, scanline_func, src_type_t, \
+ dst_type_t, repeat_mode)
#define FAST_NEAREST(scale_func_name, SRC_FORMAT, DST_FORMAT, \
src_type_t, dst_type_t, OP, repeat_mode) \
FAST_NEAREST_SCANLINE(scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \
SRC_FORMAT, DST_FORMAT, src_type_t, dst_type_t, \
OP, repeat_mode) \
- FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name ## _ ## OP, \
+ FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name ## _ ## OP, \
scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \
src_type_t, dst_type_t, repeat_mode)
@@ -439,6 +486,90 @@ fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \
}
+#define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NORMAL(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ (SCALED_NEAREST_FLAGS | \
+ FAST_PATH_NORMAL_REPEAT | \
+ FAST_PATH_X_UNIT_POSITIVE), \
+ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \
+ }
+
+#define SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ (SCALED_NEAREST_FLAGS | \
+ FAST_PATH_PAD_REPEAT | \
+ FAST_PATH_X_UNIT_POSITIVE), \
+ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \
+ }
+
+#define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ (SCALED_NEAREST_FLAGS | \
+ FAST_PATH_NONE_REPEAT | \
+ FAST_PATH_X_UNIT_POSITIVE), \
+ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \
+ }
+
+#define SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP, \
+ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \
+ }
+
+#define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NORMAL(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ (SCALED_NEAREST_FLAGS | \
+ FAST_PATH_NORMAL_REPEAT | \
+ FAST_PATH_X_UNIT_POSITIVE), \
+ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \
+ }
+
+#define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ (SCALED_NEAREST_FLAGS | \
+ FAST_PATH_PAD_REPEAT | \
+ FAST_PATH_X_UNIT_POSITIVE), \
+ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \
+ }
+
+#define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ (SCALED_NEAREST_FLAGS | \
+ FAST_PATH_NONE_REPEAT | \
+ FAST_PATH_X_UNIT_POSITIVE), \
+ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \
+ }
+
+#define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER(op,s,d,func) \
+ { PIXMAN_OP_ ## op, \
+ PIXMAN_ ## s, \
+ SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP, \
+ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
+ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
+ fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \
+ }
+
/* Prefer the use of 'cover' variant, because it is faster */
#define SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \
SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \
@@ -446,4 +577,14 @@ fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func), \
SIMPLE_NEAREST_FAST_PATH_NORMAL (op,s,d,func)
+#define SIMPLE_NEAREST_A8_MASK_FAST_PATH(op,s,d,func) \
+ SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER (op,s,d,func), \
+ SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE (op,s,d,func), \
+ SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD (op,s,d,func)
+
+#define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH(op,s,d,func) \
+ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER (op,s,d,func), \
+ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE (op,s,d,func), \
+ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD (op,s,d,func)
+
#endif
diff --git a/pixman/pixman/pixman-sse2.c b/pixman/pixman/pixman-sse2.c index 91adc0560..b62a54c3f 100644 --- a/pixman/pixman/pixman-sse2.c +++ b/pixman/pixman/pixman-sse2.c @@ -1,6302 +1,6424 @@ -/* - * Copyright © 2008 Rodrigo Kumpera - * Copyright © 2008 André Tupinambá - * - * Permission to use, copy, modify, distribute, and sell this software and its - * documentation for any purpose is hereby granted without fee, provided that - * the above copyright notice appear in all copies and that both that - * copyright notice and this permission notice appear in supporting - * documentation, and that the name of Red Hat not be used in advertising or - * publicity pertaining to distribution of the software without specific, - * written prior permission. Red Hat makes no representations about the - * suitability of this software for any purpose. It is provided "as is" - * without express or implied warranty. - * - * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS - * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY - * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN - * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING - * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS - * SOFTWARE. - * - * Author: Rodrigo Kumpera (kumpera@gmail.com) - * André Tupinambá (andrelrt@gmail.com) - * - * Based on work by Owen Taylor and Søren Sandmann - */ -#ifdef HAVE_CONFIG_H -#include <config.h> -#endif - -#include <mmintrin.h> -#include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */ -#include <emmintrin.h> /* for SSE2 intrinsics */ -#include "pixman-private.h" -#include "pixman-combine32.h" -#include "pixman-fast-path.h" - -#if defined(_MSC_VER) && defined(_M_AMD64) -/* Windows 64 doesn't allow MMX to be used, so - * the pixman-x64-mmx-emulation.h file contains - * implementations of those MMX intrinsics that - * are used in the SSE2 implementation. - */ -# include "pixman-x64-mmx-emulation.h" -#endif - -#ifdef USE_SSE2 - -/* -------------------------------------------------------------------- - * Locals - */ - -static __m64 mask_x0080; -static __m64 mask_x00ff; -static __m64 mask_x0101; -static __m64 mask_x_alpha; - -static __m64 mask_x565_rgb; -static __m64 mask_x565_unpack; - -static __m128i mask_0080; -static __m128i mask_00ff; -static __m128i mask_0101; -static __m128i mask_ffff; -static __m128i mask_ff000000; -static __m128i mask_alpha; - -static __m128i mask_565_r; -static __m128i mask_565_g1, mask_565_g2; -static __m128i mask_565_b; -static __m128i mask_red; -static __m128i mask_green; -static __m128i mask_blue; - -static __m128i mask_565_fix_rb; -static __m128i mask_565_fix_g; - -/* ---------------------------------------------------------------------- - * SSE2 Inlines - */ -static force_inline __m128i -unpack_32_1x128 (uint32_t data) -{ - return _mm_unpacklo_epi8 (_mm_cvtsi32_si128 (data), _mm_setzero_si128 ()); -} - -static force_inline void -unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi) -{ - *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ()); - *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ()); -} - -static force_inline __m128i -unpack_565_to_8888 (__m128i lo) -{ - __m128i r, g, b, rb, t; - - r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red); - g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green); - b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue); - - rb = _mm_or_si128 (r, b); - t = _mm_and_si128 (rb, mask_565_fix_rb); - t = _mm_srli_epi32 (t, 5); - rb = _mm_or_si128 (rb, t); - - t = _mm_and_si128 (g, mask_565_fix_g); - t = _mm_srli_epi32 (t, 6); - g = _mm_or_si128 (g, t); - - return _mm_or_si128 (rb, g); -} - -static force_inline void -unpack_565_128_4x128 (__m128i data, - __m128i* data0, - __m128i* data1, - __m128i* data2, - __m128i* data3) -{ - __m128i lo, hi; - - lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ()); - hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ()); - - lo = unpack_565_to_8888 (lo); - hi = unpack_565_to_8888 (hi); - - unpack_128_2x128 (lo, data0, data1); - unpack_128_2x128 (hi, data2, data3); -} - -static force_inline uint16_t -pack_565_32_16 (uint32_t pixel) -{ - return (uint16_t) (((pixel >> 8) & 0xf800) | - ((pixel >> 5) & 0x07e0) | - ((pixel >> 3) & 0x001f)); -} - -static force_inline __m128i -pack_2x128_128 (__m128i lo, __m128i hi) -{ - return _mm_packus_epi16 (lo, hi); -} - -static force_inline __m128i -pack_565_2x128_128 (__m128i lo, __m128i hi) -{ - __m128i data; - __m128i r, g1, g2, b; - - data = pack_2x128_128 (lo, hi); - - r = _mm_and_si128 (data, mask_565_r); - g1 = _mm_and_si128 (_mm_slli_epi32 (data, 3), mask_565_g1); - g2 = _mm_and_si128 (_mm_srli_epi32 (data, 5), mask_565_g2); - b = _mm_and_si128 (_mm_srli_epi32 (data, 3), mask_565_b); - - return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b); -} - -static force_inline __m128i -pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3) -{ - return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1), - pack_565_2x128_128 (*xmm2, *xmm3)); -} - -static force_inline int -is_opaque (__m128i x) -{ - __m128i ffs = _mm_cmpeq_epi8 (x, x); - - return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888; -} - -static force_inline int -is_zero (__m128i x) -{ - return _mm_movemask_epi8 ( - _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) == 0xffff; -} - -static force_inline int -is_transparent (__m128i x) -{ - return (_mm_movemask_epi8 ( - _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) & 0x8888) == 0x8888; -} - -static force_inline __m128i -expand_pixel_32_1x128 (uint32_t data) -{ - return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE (1, 0, 1, 0)); -} - -static force_inline __m128i -expand_alpha_1x128 (__m128i data) -{ - return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data, - _MM_SHUFFLE (3, 3, 3, 3)), - _MM_SHUFFLE (3, 3, 3, 3)); -} - -static force_inline void -expand_alpha_2x128 (__m128i data_lo, - __m128i data_hi, - __m128i* alpha_lo, - __m128i* alpha_hi) -{ - __m128i lo, hi; - - lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 3, 3, 3)); - hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 3, 3, 3)); - - *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 3, 3, 3)); - *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 3, 3, 3)); -} - -static force_inline void -expand_alpha_rev_2x128 (__m128i data_lo, - __m128i data_hi, - __m128i* alpha_lo, - __m128i* alpha_hi) -{ - __m128i lo, hi; - - lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (0, 0, 0, 0)); - hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (0, 0, 0, 0)); - *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (0, 0, 0, 0)); - *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (0, 0, 0, 0)); -} - -static force_inline void -pix_multiply_2x128 (__m128i* data_lo, - __m128i* data_hi, - __m128i* alpha_lo, - __m128i* alpha_hi, - __m128i* ret_lo, - __m128i* ret_hi) -{ - __m128i lo, hi; - - lo = _mm_mullo_epi16 (*data_lo, *alpha_lo); - hi = _mm_mullo_epi16 (*data_hi, *alpha_hi); - lo = _mm_adds_epu16 (lo, mask_0080); - hi = _mm_adds_epu16 (hi, mask_0080); - *ret_lo = _mm_mulhi_epu16 (lo, mask_0101); - *ret_hi = _mm_mulhi_epu16 (hi, mask_0101); -} - -static force_inline void -pix_add_multiply_2x128 (__m128i* src_lo, - __m128i* src_hi, - __m128i* alpha_dst_lo, - __m128i* alpha_dst_hi, - __m128i* dst_lo, - __m128i* dst_hi, - __m128i* alpha_src_lo, - __m128i* alpha_src_hi, - __m128i* ret_lo, - __m128i* ret_hi) -{ - __m128i t1_lo, t1_hi; - __m128i t2_lo, t2_hi; - - pix_multiply_2x128 (src_lo, src_hi, alpha_dst_lo, alpha_dst_hi, &t1_lo, &t1_hi); - pix_multiply_2x128 (dst_lo, dst_hi, alpha_src_lo, alpha_src_hi, &t2_lo, &t2_hi); - - *ret_lo = _mm_adds_epu8 (t1_lo, t2_lo); - *ret_hi = _mm_adds_epu8 (t1_hi, t2_hi); -} - -static force_inline void -negate_2x128 (__m128i data_lo, - __m128i data_hi, - __m128i* neg_lo, - __m128i* neg_hi) -{ - *neg_lo = _mm_xor_si128 (data_lo, mask_00ff); - *neg_hi = _mm_xor_si128 (data_hi, mask_00ff); -} - -static force_inline void -invert_colors_2x128 (__m128i data_lo, - __m128i data_hi, - __m128i* inv_lo, - __m128i* inv_hi) -{ - __m128i lo, hi; - - lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 0, 1, 2)); - hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 0, 1, 2)); - *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 0, 1, 2)); - *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 0, 1, 2)); -} - -static force_inline void -over_2x128 (__m128i* src_lo, - __m128i* src_hi, - __m128i* alpha_lo, - __m128i* alpha_hi, - __m128i* dst_lo, - __m128i* dst_hi) -{ - __m128i t1, t2; - - negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2); - - pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi); - - *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo); - *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi); -} - -static force_inline void -over_rev_non_pre_2x128 (__m128i src_lo, - __m128i src_hi, - __m128i* dst_lo, - __m128i* dst_hi) -{ - __m128i lo, hi; - __m128i alpha_lo, alpha_hi; - - expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi); - - lo = _mm_or_si128 (alpha_lo, mask_alpha); - hi = _mm_or_si128 (alpha_hi, mask_alpha); - - invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi); - - pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi); - - over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi); -} - -static force_inline void -in_over_2x128 (__m128i* src_lo, - __m128i* src_hi, - __m128i* alpha_lo, - __m128i* alpha_hi, - __m128i* mask_lo, - __m128i* mask_hi, - __m128i* dst_lo, - __m128i* dst_hi) -{ - __m128i s_lo, s_hi; - __m128i a_lo, a_hi; - - pix_multiply_2x128 (src_lo, src_hi, mask_lo, mask_hi, &s_lo, &s_hi); - pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi); - - over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi); -} - -/* load 4 pixels from a 16-byte boundary aligned address */ -static force_inline __m128i -load_128_aligned (__m128i* src) -{ - return _mm_load_si128 (src); -} - -/* load 4 pixels from a unaligned address */ -static force_inline __m128i -load_128_unaligned (const __m128i* src) -{ - return _mm_loadu_si128 (src); -} - -/* save 4 pixels using Write Combining memory on a 16-byte - * boundary aligned address - */ -static force_inline void -save_128_write_combining (__m128i* dst, - __m128i data) -{ - _mm_stream_si128 (dst, data); -} - -/* save 4 pixels on a 16-byte boundary aligned address */ -static force_inline void -save_128_aligned (__m128i* dst, - __m128i data) -{ - _mm_store_si128 (dst, data); -} - -/* save 4 pixels on a unaligned address */ -static force_inline void -save_128_unaligned (__m128i* dst, - __m128i data) -{ - _mm_storeu_si128 (dst, data); -} - -/* ------------------------------------------------------------------ - * MMX inlines - */ - -static force_inline __m64 -load_32_1x64 (uint32_t data) -{ - return _mm_cvtsi32_si64 (data); -} - -static force_inline __m64 -unpack_32_1x64 (uint32_t data) -{ - return _mm_unpacklo_pi8 (load_32_1x64 (data), _mm_setzero_si64 ()); -} - -static force_inline __m64 -expand_alpha_1x64 (__m64 data) -{ - return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 3, 3, 3)); -} - -static force_inline __m64 -expand_alpha_rev_1x64 (__m64 data) -{ - return _mm_shuffle_pi16 (data, _MM_SHUFFLE (0, 0, 0, 0)); -} - -static force_inline __m64 -expand_pixel_8_1x64 (uint8_t data) -{ - return _mm_shuffle_pi16 ( - unpack_32_1x64 ((uint32_t)data), _MM_SHUFFLE (0, 0, 0, 0)); -} - -static force_inline __m64 -pix_multiply_1x64 (__m64 data, - __m64 alpha) -{ - return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha), - mask_x0080), - mask_x0101); -} - -static force_inline __m64 -pix_add_multiply_1x64 (__m64* src, - __m64* alpha_dst, - __m64* dst, - __m64* alpha_src) -{ - __m64 t1 = pix_multiply_1x64 (*src, *alpha_dst); - __m64 t2 = pix_multiply_1x64 (*dst, *alpha_src); - - return _mm_adds_pu8 (t1, t2); -} - -static force_inline __m64 -negate_1x64 (__m64 data) -{ - return _mm_xor_si64 (data, mask_x00ff); -} - -static force_inline __m64 -invert_colors_1x64 (__m64 data) -{ - return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 0, 1, 2)); -} - -static force_inline __m64 -over_1x64 (__m64 src, __m64 alpha, __m64 dst) -{ - return _mm_adds_pu8 (src, pix_multiply_1x64 (dst, negate_1x64 (alpha))); -} - -static force_inline __m64 -in_over_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst) -{ - return over_1x64 (pix_multiply_1x64 (*src, *mask), - pix_multiply_1x64 (*alpha, *mask), - *dst); -} - -static force_inline __m64 -over_rev_non_pre_1x64 (__m64 src, __m64 dst) -{ - __m64 alpha = expand_alpha_1x64 (src); - - return over_1x64 (pix_multiply_1x64 (invert_colors_1x64 (src), - _mm_or_si64 (alpha, mask_x_alpha)), - alpha, - dst); -} - -static force_inline uint32_t -pack_1x64_32 (__m64 data) -{ - return _mm_cvtsi64_si32 (_mm_packs_pu16 (data, _mm_setzero_si64 ())); -} - -/* Expand 16 bits positioned at @pos (0-3) of a mmx register into - * - * 00RR00GG00BB - * - * --- Expanding 565 in the low word --- - * - * m = (m << (32 - 3)) | (m << (16 - 5)) | m; - * m = m & (01f0003f001f); - * m = m * (008404100840); - * m = m >> 8; - * - * Note the trick here - the top word is shifted by another nibble to - * avoid it bumping into the middle word - */ -static force_inline __m64 -expand565_16_1x64 (uint16_t pixel) -{ - __m64 p; - __m64 t1, t2; - - p = _mm_cvtsi32_si64 ((uint32_t) pixel); - - t1 = _mm_slli_si64 (p, 36 - 11); - t2 = _mm_slli_si64 (p, 16 - 5); - - p = _mm_or_si64 (t1, p); - p = _mm_or_si64 (t2, p); - p = _mm_and_si64 (p, mask_x565_rgb); - p = _mm_mullo_pi16 (p, mask_x565_unpack); - - return _mm_srli_pi16 (p, 8); -} - -/* ---------------------------------------------------------------------------- - * Compose Core transformations - */ -static force_inline uint32_t -core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst) -{ - uint8_t a; - __m64 ms; - - a = src >> 24; - - if (a == 0xff) - { - return src; - } - else if (src) - { - ms = unpack_32_1x64 (src); - return pack_1x64_32 ( - over_1x64 (ms, expand_alpha_1x64 (ms), unpack_32_1x64 (dst))); - } - - return dst; -} - -static force_inline uint32_t -combine1 (const uint32_t *ps, const uint32_t *pm) -{ - uint32_t s = *ps; - - if (pm) - { - __m64 ms, mm; - - mm = unpack_32_1x64 (*pm); - mm = expand_alpha_1x64 (mm); - - ms = unpack_32_1x64 (s); - ms = pix_multiply_1x64 (ms, mm); - - s = pack_1x64_32 (ms); - } - - return s; -} - -static force_inline __m128i -combine4 (const __m128i *ps, const __m128i *pm) -{ - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_msk_lo, xmm_msk_hi; - __m128i s; - - if (pm) - { - xmm_msk_lo = load_128_unaligned (pm); - - if (is_transparent (xmm_msk_lo)) - return _mm_setzero_si128 (); - } - - s = load_128_unaligned (ps); - - if (pm) - { - unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi); - - expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_msk_lo, &xmm_msk_hi, - &xmm_src_lo, &xmm_src_hi); - - s = pack_2x128_128 (xmm_src_lo, xmm_src_hi); - } - - return s; -} - -static force_inline void -core_combine_over_u_sse2_mask (uint32_t * pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - uint32_t s, d; - - /* Align dst on a 16-byte boundary */ - while (w && ((unsigned long)pd & 15)) - { - d = *pd; - s = combine1 (ps, pm); - - if (s) - *pd = core_combine_over_u_pixel_sse2 (s, d); - pd++; - ps++; - pm++; - w--; - } - - while (w >= 4) - { - __m128i mask = load_128_unaligned ((__m128i *)pm); - - if (!is_zero (mask)) - { - __m128i src; - __m128i src_hi, src_lo; - __m128i mask_hi, mask_lo; - __m128i alpha_hi, alpha_lo; - - src = load_128_unaligned ((__m128i *)ps); - - if (is_opaque (_mm_and_si128 (src, mask))) - { - save_128_aligned ((__m128i *)pd, src); - } - else - { - __m128i dst = load_128_aligned ((__m128i *)pd); - __m128i dst_hi, dst_lo; - - unpack_128_2x128 (mask, &mask_lo, &mask_hi); - unpack_128_2x128 (src, &src_lo, &src_hi); - - expand_alpha_2x128 (mask_lo, mask_hi, &mask_lo, &mask_hi); - pix_multiply_2x128 (&src_lo, &src_hi, - &mask_lo, &mask_hi, - &src_lo, &src_hi); - - unpack_128_2x128 (dst, &dst_lo, &dst_hi); - - expand_alpha_2x128 (src_lo, src_hi, - &alpha_lo, &alpha_hi); - - over_2x128 (&src_lo, &src_hi, &alpha_lo, &alpha_hi, - &dst_lo, &dst_hi); - - save_128_aligned ( - (__m128i *)pd, - pack_2x128_128 (dst_lo, dst_hi)); - } - } - - pm += 4; - ps += 4; - pd += 4; - w -= 4; - } - while (w) - { - d = *pd; - s = combine1 (ps, pm); - - if (s) - *pd = core_combine_over_u_pixel_sse2 (s, d); - pd++; - ps++; - pm++; - - w--; - } -} - -static force_inline void -core_combine_over_u_sse2_no_mask (uint32_t * pd, - const uint32_t* ps, - int w) -{ - uint32_t s, d; - - /* Align dst on a 16-byte boundary */ - while (w && ((unsigned long)pd & 15)) - { - d = *pd; - s = *ps; - - if (s) - *pd = core_combine_over_u_pixel_sse2 (s, d); - pd++; - ps++; - w--; - } - - while (w >= 4) - { - __m128i src; - __m128i src_hi, src_lo, dst_hi, dst_lo; - __m128i alpha_hi, alpha_lo; - - src = load_128_unaligned ((__m128i *)ps); - - if (!is_zero (src)) - { - if (is_opaque (src)) - { - save_128_aligned ((__m128i *)pd, src); - } - else - { - __m128i dst = load_128_aligned ((__m128i *)pd); - - unpack_128_2x128 (src, &src_lo, &src_hi); - unpack_128_2x128 (dst, &dst_lo, &dst_hi); - - expand_alpha_2x128 (src_lo, src_hi, - &alpha_lo, &alpha_hi); - over_2x128 (&src_lo, &src_hi, &alpha_lo, &alpha_hi, - &dst_lo, &dst_hi); - - save_128_aligned ( - (__m128i *)pd, - pack_2x128_128 (dst_lo, dst_hi)); - } - } - - ps += 4; - pd += 4; - w -= 4; - } - while (w) - { - d = *pd; - s = *ps; - - if (s) - *pd = core_combine_over_u_pixel_sse2 (s, d); - pd++; - ps++; - - w--; - } -} - -static force_inline void -core_combine_over_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - if (pm) - core_combine_over_u_sse2_mask (pd, ps, pm, w); - else - core_combine_over_u_sse2_no_mask (pd, ps, w); -} - -static force_inline void -core_combine_over_reverse_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - uint32_t s, d; - - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_alpha_lo, xmm_alpha_hi; - - /* Align dst on a 16-byte boundary */ - while (w && - ((unsigned long)pd & 15)) - { - d = *pd; - s = combine1 (ps, pm); - - *pd++ = core_combine_over_u_pixel_sse2 (d, s); - w--; - ps++; - if (pm) - pm++; - } - - while (w >= 4) - { - /* I'm loading unaligned because I'm not sure - * about the address alignment. - */ - xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - over_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_src_lo, &xmm_src_hi); - - /* rebuid the 4 pixel data and save*/ - save_128_aligned ((__m128i*)pd, - pack_2x128_128 (xmm_src_lo, xmm_src_hi)); - - w -= 4; - ps += 4; - pd += 4; - - if (pm) - pm += 4; - } - - while (w) - { - d = *pd; - s = combine1 (ps, pm); - - *pd++ = core_combine_over_u_pixel_sse2 (d, s); - ps++; - w--; - if (pm) - pm++; - } -} - -static force_inline uint32_t -core_combine_in_u_pixelsse2 (uint32_t src, uint32_t dst) -{ - uint32_t maska = src >> 24; - - if (maska == 0) - { - return 0; - } - else if (maska != 0xff) - { - return pack_1x64_32 ( - pix_multiply_1x64 (unpack_32_1x64 (dst), - expand_alpha_1x64 (unpack_32_1x64 (src)))); - } - - return dst; -} - -static force_inline void -core_combine_in_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - uint32_t s, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - - while (w && ((unsigned long) pd & 15)) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_in_u_pixelsse2 (d, s); - w--; - ps++; - if (pm) - pm++; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_dst_lo, &xmm_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ((__m128i*)pd, - pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - w -= 4; - if (pm) - pm += 4; - } - - while (w) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_in_u_pixelsse2 (d, s); - w--; - ps++; - if (pm) - pm++; - } -} - -static force_inline void -core_combine_reverse_in_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t *pm, - int w) -{ - uint32_t s, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - - while (w && ((unsigned long) pd & 15)) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_in_u_pixelsse2 (s, d); - ps++; - w--; - if (pm) - pm++; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_src_lo, &xmm_src_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - w -= 4; - if (pm) - pm += 4; - } - - while (w) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_in_u_pixelsse2 (s, d); - w--; - ps++; - if (pm) - pm++; - } -} - -static force_inline void -core_combine_reverse_out_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - while (w && ((unsigned long) pd & 15)) - { - uint32_t s = combine1 (ps, pm); - uint32_t d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (d), negate_1x64 ( - expand_alpha_1x64 (unpack_32_1x64 (s))))); - - if (pm) - pm++; - ps++; - w--; - } - - while (w >= 4) - { - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - - xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - negate_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - - pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_src_lo, &xmm_src_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - if (pm) - pm += 4; - - w -= 4; - } - - while (w) - { - uint32_t s = combine1 (ps, pm); - uint32_t d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (d), negate_1x64 ( - expand_alpha_1x64 (unpack_32_1x64 (s))))); - ps++; - if (pm) - pm++; - w--; - } -} - -static force_inline void -core_combine_out_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - while (w && ((unsigned long) pd & 15)) - { - uint32_t s = combine1 (ps, pm); - uint32_t d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (s), negate_1x64 ( - expand_alpha_1x64 (unpack_32_1x64 (d))))); - w--; - ps++; - if (pm) - pm++; - } - - while (w >= 4) - { - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - - xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm); - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - negate_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_dst_lo, &xmm_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - w -= 4; - if (pm) - pm += 4; - } - - while (w) - { - uint32_t s = combine1 (ps, pm); - uint32_t d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (s), negate_1x64 ( - expand_alpha_1x64 (unpack_32_1x64 (d))))); - w--; - ps++; - if (pm) - pm++; - } -} - -static force_inline uint32_t -core_combine_atop_u_pixel_sse2 (uint32_t src, - uint32_t dst) -{ - __m64 s = unpack_32_1x64 (src); - __m64 d = unpack_32_1x64 (dst); - - __m64 sa = negate_1x64 (expand_alpha_1x64 (s)); - __m64 da = expand_alpha_1x64 (d); - - return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa)); -} - -static force_inline void -core_combine_atop_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - uint32_t s, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; - __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; - - while (w && ((unsigned long) pd & 15)) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_atop_u_pixel_sse2 (s, d); - w--; - ps++; - if (pm) - pm++; - } - - while (w >= 4) - { - xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - - pix_add_multiply_2x128 ( - &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, - &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - w -= 4; - if (pm) - pm += 4; - } - - while (w) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_atop_u_pixel_sse2 (s, d); - w--; - ps++; - if (pm) - pm++; - } -} - -static force_inline uint32_t -core_combine_reverse_atop_u_pixel_sse2 (uint32_t src, - uint32_t dst) -{ - __m64 s = unpack_32_1x64 (src); - __m64 d = unpack_32_1x64 (dst); - - __m64 sa = expand_alpha_1x64 (s); - __m64 da = negate_1x64 (expand_alpha_1x64 (d)); - - return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa)); -} - -static force_inline void -core_combine_reverse_atop_u_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t* pm, - int w) -{ - uint32_t s, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; - __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; - - while (w && ((unsigned long) pd & 15)) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d); - ps++; - w--; - if (pm) - pm++; - } - - while (w >= 4) - { - xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - pix_add_multiply_2x128 ( - &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, - &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - w -= 4; - if (pm) - pm += 4; - } - - while (w) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d); - ps++; - w--; - if (pm) - pm++; - } -} - -static force_inline uint32_t -core_combine_xor_u_pixel_sse2 (uint32_t src, - uint32_t dst) -{ - __m64 s = unpack_32_1x64 (src); - __m64 d = unpack_32_1x64 (dst); - - __m64 neg_d = negate_1x64 (expand_alpha_1x64 (d)); - __m64 neg_s = negate_1x64 (expand_alpha_1x64 (s)); - - return pack_1x64_32 (pix_add_multiply_1x64 (&s, &neg_d, &d, &neg_s)); -} - -static force_inline void -core_combine_xor_u_sse2 (uint32_t* dst, - const uint32_t* src, - const uint32_t *mask, - int width) -{ - int w = width; - uint32_t s, d; - uint32_t* pd = dst; - const uint32_t* ps = src; - const uint32_t* pm = mask; - - __m128i xmm_src, xmm_src_lo, xmm_src_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; - __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; - - while (w && ((unsigned long) pd & 15)) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_xor_u_pixel_sse2 (s, d); - w--; - ps++; - if (pm) - pm++; - } - - while (w >= 4) - { - xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm); - xmm_dst = load_128_aligned ((__m128i*) pd); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - pix_add_multiply_2x128 ( - &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, - &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - w -= 4; - if (pm) - pm += 4; - } - - while (w) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_xor_u_pixel_sse2 (s, d); - w--; - ps++; - if (pm) - pm++; - } -} - -static force_inline void -core_combine_add_u_sse2 (uint32_t* dst, - const uint32_t* src, - const uint32_t* mask, - int width) -{ - int w = width; - uint32_t s, d; - uint32_t* pd = dst; - const uint32_t* ps = src; - const uint32_t* pm = mask; - - while (w && (unsigned long)pd & 15) - { - s = combine1 (ps, pm); - d = *pd; - - ps++; - if (pm) - pm++; - *pd++ = _mm_cvtsi64_si32 ( - _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d))); - w--; - } - - while (w >= 4) - { - __m128i s; - - s = combine4 ((__m128i*)ps, (__m128i*)pm); - - save_128_aligned ( - (__m128i*)pd, _mm_adds_epu8 (s, load_128_aligned ((__m128i*)pd))); - - pd += 4; - ps += 4; - if (pm) - pm += 4; - w -= 4; - } - - while (w--) - { - s = combine1 (ps, pm); - d = *pd; - - ps++; - *pd++ = _mm_cvtsi64_si32 ( - _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d))); - if (pm) - pm++; - } -} - -static force_inline uint32_t -core_combine_saturate_u_pixel_sse2 (uint32_t src, - uint32_t dst) -{ - __m64 ms = unpack_32_1x64 (src); - __m64 md = unpack_32_1x64 (dst); - uint32_t sa = src >> 24; - uint32_t da = ~dst >> 24; - - if (sa > da) - { - ms = pix_multiply_1x64 ( - ms, expand_alpha_1x64 (unpack_32_1x64 (DIV_UN8 (da, sa) << 24))); - } - - return pack_1x64_32 (_mm_adds_pu16 (md, ms)); -} - -static force_inline void -core_combine_saturate_u_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, d; - - uint32_t pack_cmp; - __m128i xmm_src, xmm_dst; - - while (w && (unsigned long)pd & 15) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); - w--; - ps++; - if (pm) - pm++; - } - - while (w >= 4) - { - xmm_dst = load_128_aligned ((__m128i*)pd); - xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm); - - pack_cmp = _mm_movemask_epi8 ( - _mm_cmpgt_epi32 ( - _mm_srli_epi32 (xmm_src, 24), - _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24))); - - /* if some alpha src is grater than respective ~alpha dst */ - if (pack_cmp) - { - s = combine1 (ps++, pm); - d = *pd; - *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); - if (pm) - pm++; - - s = combine1 (ps++, pm); - d = *pd; - *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); - if (pm) - pm++; - - s = combine1 (ps++, pm); - d = *pd; - *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); - if (pm) - pm++; - - s = combine1 (ps++, pm); - d = *pd; - *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); - if (pm) - pm++; - } - else - { - save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src)); - - pd += 4; - ps += 4; - if (pm) - pm += 4; - } - - w -= 4; - } - - while (w--) - { - s = combine1 (ps, pm); - d = *pd; - - *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); - ps++; - if (pm) - pm++; - } -} - -static force_inline void -core_combine_src_ca_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m))); - w--; - } - - while (w >= 4) - { - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m))); - w--; - } -} - -static force_inline uint32_t -core_combine_over_ca_pixel_sse2 (uint32_t src, - uint32_t mask, - uint32_t dst) -{ - __m64 s = unpack_32_1x64 (src); - __m64 expAlpha = expand_alpha_1x64 (s); - __m64 unpk_mask = unpack_32_1x64 (mask); - __m64 unpk_dst = unpack_32_1x64 (dst); - - return pack_1x64_32 (in_over_1x64 (&s, &expAlpha, &unpk_mask, &unpk_dst)); -} - -static force_inline void -core_combine_over_ca_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_alpha_lo, xmm_alpha_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - in_over_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d); - w--; - } -} - -static force_inline uint32_t -core_combine_over_reverse_ca_pixel_sse2 (uint32_t src, - uint32_t mask, - uint32_t dst) -{ - __m64 d = unpack_32_1x64 (dst); - - return pack_1x64_32 ( - over_1x64 (d, expand_alpha_1x64 (d), - pix_multiply_1x64 (unpack_32_1x64 (src), - unpack_32_1x64 (mask)))); -} - -static force_inline void -core_combine_over_reverse_ca_sse2 (uint32_t* pd, - const uint32_t* ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_alpha_lo, xmm_alpha_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - over_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_mask_lo, &xmm_mask_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d); - w--; - } -} - -static force_inline void -core_combine_in_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_alpha_lo, xmm_alpha_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)), - expand_alpha_1x64 (unpack_32_1x64 (d)))); - - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi); - - pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - pix_multiply_1x64 ( - unpack_32_1x64 (s), unpack_32_1x64 (m)), - expand_alpha_1x64 (unpack_32_1x64 (d)))); - - w--; - } -} - -static force_inline void -core_combine_in_reverse_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_alpha_lo, xmm_alpha_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (d), - pix_multiply_1x64 (unpack_32_1x64 (m), - expand_alpha_1x64 (unpack_32_1x64 (s))))); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (d), - pix_multiply_1x64 (unpack_32_1x64 (m), - expand_alpha_1x64 (unpack_32_1x64 (s))))); - w--; - } -} - -static force_inline void -core_combine_out_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_alpha_lo, xmm_alpha_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - pix_multiply_1x64 ( - unpack_32_1x64 (s), unpack_32_1x64 (m)), - negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d))))); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - negate_2x128 (xmm_alpha_lo, xmm_alpha_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi); - pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - pix_multiply_1x64 ( - unpack_32_1x64 (s), unpack_32_1x64 (m)), - negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d))))); - - w--; - } -} - -static force_inline void -core_combine_out_reverse_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_alpha_lo, xmm_alpha_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (d), - negate_1x64 (pix_multiply_1x64 ( - unpack_32_1x64 (m), - expand_alpha_1x64 (unpack_32_1x64 (s)))))); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_mask_lo, &xmm_mask_hi); - - negate_2x128 (xmm_mask_lo, xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (d), - negate_1x64 (pix_multiply_1x64 ( - unpack_32_1x64 (m), - expand_alpha_1x64 (unpack_32_1x64 (s)))))); - w--; - } -} - -static force_inline uint32_t -core_combine_atop_ca_pixel_sse2 (uint32_t src, - uint32_t mask, - uint32_t dst) -{ - __m64 m = unpack_32_1x64 (mask); - __m64 s = unpack_32_1x64 (src); - __m64 d = unpack_32_1x64 (dst); - __m64 sa = expand_alpha_1x64 (s); - __m64 da = expand_alpha_1x64 (d); - - s = pix_multiply_1x64 (s, m); - m = negate_1x64 (pix_multiply_1x64 (m, sa)); - - return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da)); -} - -static force_inline void -core_combine_atop_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; - __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_src_lo, &xmm_src_hi); - pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi, - &xmm_mask_lo, &xmm_mask_hi); - - negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - pix_add_multiply_2x128 ( - &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, - &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d); - w--; - } -} - -static force_inline uint32_t -core_combine_reverse_atop_ca_pixel_sse2 (uint32_t src, - uint32_t mask, - uint32_t dst) -{ - __m64 m = unpack_32_1x64 (mask); - __m64 s = unpack_32_1x64 (src); - __m64 d = unpack_32_1x64 (dst); - - __m64 da = negate_1x64 (expand_alpha_1x64 (d)); - __m64 sa = expand_alpha_1x64 (s); - - s = pix_multiply_1x64 (s, m); - m = pix_multiply_1x64 (m, sa); - - return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da)); -} - -static force_inline void -core_combine_reverse_atop_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; - __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_src_lo, &xmm_src_hi); - pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi, - &xmm_mask_lo, &xmm_mask_hi); - - negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - pix_add_multiply_2x128 ( - &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, - &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d); - w--; - } -} - -static force_inline uint32_t -core_combine_xor_ca_pixel_sse2 (uint32_t src, - uint32_t mask, - uint32_t dst) -{ - __m64 a = unpack_32_1x64 (mask); - __m64 s = unpack_32_1x64 (src); - __m64 d = unpack_32_1x64 (dst); - - __m64 alpha_dst = negate_1x64 (pix_multiply_1x64 ( - a, expand_alpha_1x64 (s))); - __m64 dest = pix_multiply_1x64 (s, a); - __m64 alpha_src = negate_1x64 (expand_alpha_1x64 (d)); - - return pack_1x64_32 (pix_add_multiply_1x64 (&d, - &alpha_dst, - &dest, - &alpha_src)); -} - -static force_inline void -core_combine_xor_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; - __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d); - w--; - } - - while (w >= 4) - { - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_src_lo, &xmm_src_hi); - pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, - &xmm_alpha_src_lo, &xmm_alpha_src_hi, - &xmm_mask_lo, &xmm_mask_hi); - - negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, - &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); - negate_2x128 (xmm_mask_lo, xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - pix_add_multiply_2x128 ( - &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, - &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d); - w--; - } -} - -static force_inline void -core_combine_add_ca_sse2 (uint32_t * pd, - const uint32_t *ps, - const uint32_t *pm, - int w) -{ - uint32_t s, m, d; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask_lo, xmm_mask_hi; - - while (w && (unsigned long)pd & 15) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s), - unpack_32_1x64 (m)), - unpack_32_1x64 (d))); - w--; - } - - while (w >= 4) - { - xmm_src_hi = load_128_unaligned ((__m128i*)ps); - xmm_mask_hi = load_128_unaligned ((__m128i*)pm); - xmm_dst_hi = load_128_aligned ((__m128i*)pd); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_src_lo, &xmm_src_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 ( - _mm_adds_epu8 (xmm_src_lo, xmm_dst_lo), - _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi))); - - ps += 4; - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - s = *ps++; - m = *pm++; - d = *pd; - - *pd++ = pack_1x64_32 ( - _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s), - unpack_32_1x64 (m)), - unpack_32_1x64 (d))); - w--; - } -} - -/* --------------------------------------------------- - * fb_compose_setup_sSE2 - */ -static force_inline __m64 -create_mask_16_64 (uint16_t mask) -{ - return _mm_set1_pi16 (mask); -} - -static force_inline __m128i -create_mask_16_128 (uint16_t mask) -{ - return _mm_set1_epi16 (mask); -} - -static force_inline __m64 -create_mask_2x32_64 (uint32_t mask0, - uint32_t mask1) -{ - return _mm_set_pi32 (mask0, mask1); -} - -/* Work around a code generation bug in Sun Studio 12. */ -#if defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) -# define create_mask_2x32_128(mask0, mask1) \ - (_mm_set_epi32 ((mask0), (mask1), (mask0), (mask1))) -#else -static force_inline __m128i -create_mask_2x32_128 (uint32_t mask0, - uint32_t mask1) -{ - return _mm_set_epi32 (mask0, mask1, mask0, mask1); -} -#endif - -/* SSE2 code patch for fbcompose.c */ - -static void -sse2_combine_over_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_over_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_over_reverse_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_over_reverse_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_in_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_in_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_in_reverse_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_reverse_in_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_out_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_out_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_out_reverse_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_reverse_out_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_atop_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_atop_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_atop_reverse_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_reverse_atop_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_xor_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_xor_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_add_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_add_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_saturate_u (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_saturate_u_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_src_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_src_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_over_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_over_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_over_reverse_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_over_reverse_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_in_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_in_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_in_reverse_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_in_reverse_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_out_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_out_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_out_reverse_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_out_reverse_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_atop_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_atop_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_atop_reverse_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_reverse_atop_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_xor_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_xor_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -static void -sse2_combine_add_ca (pixman_implementation_t *imp, - pixman_op_t op, - uint32_t * dst, - const uint32_t * src, - const uint32_t * mask, - int width) -{ - core_combine_add_ca_sse2 (dst, src, mask, width); - _mm_empty (); -} - -/* ------------------------------------------------------------------- - * composite_over_n_8888 - */ - -static void -sse2_composite_over_n_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src; - uint32_t *dst_line, *dst, d; - int32_t w; - int dst_stride; - __m128i xmm_src, xmm_alpha; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - - xmm_src = expand_pixel_32_1x128 (src); - xmm_alpha = expand_alpha_1x128 (xmm_src); - - while (height--) - { - dst = dst_line; - - dst_line += dst_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - d = *dst; - *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src), - _mm_movepi64_pi64 (xmm_alpha), - unpack_32_1x64 (d))); - w--; - } - - while (w >= 4) - { - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_dst_lo, &xmm_dst_hi); - - /* rebuid the 4 pixel data and save*/ - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - w -= 4; - dst += 4; - } - - while (w) - { - d = *dst; - *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src), - _mm_movepi64_pi64 (xmm_alpha), - unpack_32_1x64 (d))); - w--; - } - - } - _mm_empty (); -} - -/* --------------------------------------------------------------------- - * composite_over_n_0565 - */ -static void -sse2_composite_over_n_0565 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src; - uint16_t *dst_line, *dst, d; - int32_t w; - int dst_stride; - __m128i xmm_src, xmm_alpha; - __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - - xmm_src = expand_pixel_32_1x128 (src); - xmm_alpha = expand_alpha_1x128 (xmm_src); - - while (height--) - { - dst = dst_line; - - dst_line += dst_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - d = *dst; - - *dst++ = pack_565_32_16 ( - pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src), - _mm_movepi64_pi64 (xmm_alpha), - expand565_16_1x64 (d)))); - w--; - } - - while (w >= 8) - { - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_565_128_4x128 (xmm_dst, - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); - - over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_dst0, &xmm_dst1); - over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_dst2, &xmm_dst3); - - xmm_dst = pack_565_4x128_128 ( - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); - - save_128_aligned ((__m128i*)dst, xmm_dst); - - dst += 8; - w -= 8; - } - - while (w--) - { - d = *dst; - *dst++ = pack_565_32_16 ( - pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src), - _mm_movepi64_pi64 (xmm_alpha), - expand565_16_1x64 (d)))); - } - } - - _mm_empty (); -} - -/* ------------------------------ - * composite_add_n_8888_8888_ca - */ -static void -sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src, srca; - uint32_t *dst_line, d; - uint32_t *mask_line, m; - uint32_t pack_cmp; - int dst_stride, mask_stride; - - __m128i xmm_src, xmm_alpha; - __m128i xmm_dst; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - - __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - srca = src >> 24; - - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); - - xmm_src = _mm_unpacklo_epi8 ( - create_mask_2x32_128 (src, src), _mm_setzero_si128 ()); - xmm_alpha = expand_alpha_1x128 (xmm_src); - mmx_src = _mm_movepi64_pi64 (xmm_src); - mmx_alpha = _mm_movepi64_pi64 (xmm_alpha); - - while (height--) - { - int w = width; - const uint32_t *pm = (uint32_t *)mask_line; - uint32_t *pd = (uint32_t *)dst_line; - - dst_line += dst_stride; - mask_line += mask_stride; - - while (w && (unsigned long)pd & 15) - { - m = *pm++; - - if (m) - { - d = *pd; - - mmx_mask = unpack_32_1x64 (m); - mmx_dest = unpack_32_1x64 (d); - - *pd = pack_1x64_32 ( - _mm_adds_pu8 (pix_multiply_1x64 (mmx_mask, mmx_src), mmx_dest)); - } - - pd++; - w--; - } - - while (w >= 4) - { - xmm_mask = load_128_unaligned ((__m128i*)pm); - - pack_cmp = - _mm_movemask_epi8 ( - _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); - - /* if all bits in mask are zero, pack_cmp are equal to 0xffff */ - if (pack_cmp != 0xffff) - { - xmm_dst = load_128_aligned ((__m128i*)pd); - - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - - pix_multiply_2x128 (&xmm_src, &xmm_src, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - xmm_mask_hi = pack_2x128_128 (xmm_mask_lo, xmm_mask_hi); - - save_128_aligned ( - (__m128i*)pd, _mm_adds_epu8 (xmm_mask_hi, xmm_dst)); - } - - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - m = *pm++; - - if (m) - { - d = *pd; - - mmx_mask = unpack_32_1x64 (m); - mmx_dest = unpack_32_1x64 (d); - - *pd = pack_1x64_32 ( - _mm_adds_pu8 (pix_multiply_1x64 (mmx_mask, mmx_src), mmx_dest)); - } - - pd++; - w--; - } - } - - _mm_empty (); -} - -/* --------------------------------------------------------------------------- - * composite_over_n_8888_8888_ca - */ - -static void -sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src; - uint32_t *dst_line, d; - uint32_t *mask_line, m; - uint32_t pack_cmp; - int dst_stride, mask_stride; - - __m128i xmm_src, xmm_alpha; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - - __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); - - xmm_src = _mm_unpacklo_epi8 ( - create_mask_2x32_128 (src, src), _mm_setzero_si128 ()); - xmm_alpha = expand_alpha_1x128 (xmm_src); - mmx_src = _mm_movepi64_pi64 (xmm_src); - mmx_alpha = _mm_movepi64_pi64 (xmm_alpha); - - while (height--) - { - int w = width; - const uint32_t *pm = (uint32_t *)mask_line; - uint32_t *pd = (uint32_t *)dst_line; - - dst_line += dst_stride; - mask_line += mask_stride; - - while (w && (unsigned long)pd & 15) - { - m = *pm++; - - if (m) - { - d = *pd; - mmx_mask = unpack_32_1x64 (m); - mmx_dest = unpack_32_1x64 (d); - - *pd = pack_1x64_32 (in_over_1x64 (&mmx_src, - &mmx_alpha, - &mmx_mask, - &mmx_dest)); - } - - pd++; - w--; - } - - while (w >= 4) - { - xmm_mask = load_128_unaligned ((__m128i*)pm); - - pack_cmp = - _mm_movemask_epi8 ( - _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); - - /* if all bits in mask are zero, pack_cmp are equal to 0xffff */ - if (pack_cmp != 0xffff) - { - xmm_dst = load_128_aligned ((__m128i*)pd); - - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - in_over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - - pd += 4; - pm += 4; - w -= 4; - } - - while (w) - { - m = *pm++; - - if (m) - { - d = *pd; - mmx_mask = unpack_32_1x64 (m); - mmx_dest = unpack_32_1x64 (d); - - *pd = pack_1x64_32 ( - in_over_1x64 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)); - } - - pd++; - w--; - } - } - - _mm_empty (); -} - -/*--------------------------------------------------------------------- - * composite_over_8888_n_8888 - */ - -static void -sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *dst_line, *dst; - uint32_t *src_line, *src; - uint32_t mask; - int32_t w; - int dst_stride, src_stride; - - __m128i xmm_mask; - __m128i xmm_src, xmm_src_lo, xmm_src_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_alpha_lo, xmm_alpha_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - mask = _pixman_image_get_solid (imp, mask_image, PIXMAN_a8r8g8b8); - - xmm_mask = create_mask_16_128 (mask >> 24); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - src = src_line; - src_line += src_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - uint32_t s = *src++; - - if (s) - { - uint32_t d = *dst; - - __m64 ms = unpack_32_1x64 (s); - __m64 alpha = expand_alpha_1x64 (ms); - __m64 dest = _mm_movepi64_pi64 (xmm_mask); - __m64 alpha_dst = unpack_32_1x64 (d); - - *dst = pack_1x64_32 ( - in_over_1x64 (&ms, &alpha, &dest, &alpha_dst)); - } - dst++; - w--; - } - - while (w >= 4) - { - xmm_src = load_128_unaligned ((__m128i*)src); - - if (!is_zero (xmm_src)) - { - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - in_over_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_mask, &xmm_mask, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - - dst += 4; - src += 4; - w -= 4; - } - - while (w) - { - uint32_t s = *src++; - - if (s) - { - uint32_t d = *dst; - - __m64 ms = unpack_32_1x64 (s); - __m64 alpha = expand_alpha_1x64 (ms); - __m64 mask = _mm_movepi64_pi64 (xmm_mask); - __m64 dest = unpack_32_1x64 (d); - - *dst = pack_1x64_32 ( - in_over_1x64 (&ms, &alpha, &mask, &dest)); - } - - dst++; - w--; - } - } - - _mm_empty (); -} - -/*--------------------------------------------------------------------- - * composite_over_8888_n_8888 - */ - -static void -sse2_composite_src_x888_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *dst_line, *dst; - uint32_t *src_line, *src; - int32_t w; - int dst_stride, src_stride; - - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - src = src_line; - src_line += src_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - *dst++ = *src++ | 0xff000000; - w--; - } - - while (w >= 16) - { - __m128i xmm_src1, xmm_src2, xmm_src3, xmm_src4; - - xmm_src1 = load_128_unaligned ((__m128i*)src + 0); - xmm_src2 = load_128_unaligned ((__m128i*)src + 1); - xmm_src3 = load_128_unaligned ((__m128i*)src + 2); - xmm_src4 = load_128_unaligned ((__m128i*)src + 3); - - save_128_aligned ((__m128i*)dst + 0, _mm_or_si128 (xmm_src1, mask_ff000000)); - save_128_aligned ((__m128i*)dst + 1, _mm_or_si128 (xmm_src2, mask_ff000000)); - save_128_aligned ((__m128i*)dst + 2, _mm_or_si128 (xmm_src3, mask_ff000000)); - save_128_aligned ((__m128i*)dst + 3, _mm_or_si128 (xmm_src4, mask_ff000000)); - - dst += 16; - src += 16; - w -= 16; - } - - while (w) - { - *dst++ = *src++ | 0xff000000; - w--; - } - } - - _mm_empty (); -} - -/* --------------------------------------------------------------------- - * composite_over_x888_n_8888 - */ -static void -sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *dst_line, *dst; - uint32_t *src_line, *src; - uint32_t mask; - int dst_stride, src_stride; - int32_t w; - - __m128i xmm_mask, xmm_alpha; - __m128i xmm_src, xmm_src_lo, xmm_src_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - mask = _pixman_image_get_solid (imp, mask_image, PIXMAN_a8r8g8b8); - - xmm_mask = create_mask_16_128 (mask >> 24); - xmm_alpha = mask_00ff; - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - src = src_line; - src_line += src_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - uint32_t s = (*src++) | 0xff000000; - uint32_t d = *dst; - - __m64 src = unpack_32_1x64 (s); - __m64 alpha = _mm_movepi64_pi64 (xmm_alpha); - __m64 mask = _mm_movepi64_pi64 (xmm_mask); - __m64 dest = unpack_32_1x64 (d); - - *dst++ = pack_1x64_32 ( - in_over_1x64 (&src, &alpha, &mask, &dest)); - - w--; - } - - while (w >= 4) - { - xmm_src = _mm_or_si128 ( - load_128_unaligned ((__m128i*)src), mask_ff000000); - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - in_over_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_alpha, &xmm_alpha, - &xmm_mask, &xmm_mask, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - dst += 4; - src += 4; - w -= 4; - - } - - while (w) - { - uint32_t s = (*src++) | 0xff000000; - uint32_t d = *dst; - - __m64 src = unpack_32_1x64 (s); - __m64 alpha = _mm_movepi64_pi64 (xmm_alpha); - __m64 mask = _mm_movepi64_pi64 (xmm_mask); - __m64 dest = unpack_32_1x64 (d); - - *dst++ = pack_1x64_32 ( - in_over_1x64 (&src, &alpha, &mask, &dest)); - - w--; - } - } - - _mm_empty (); -} - -/* -------------------------------------------------------------------- - * composite_over_8888_8888 - */ -static void -sse2_composite_over_8888_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - int dst_stride, src_stride; - uint32_t *dst_line, *dst; - uint32_t *src_line, *src; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - dst = dst_line; - src = src_line; - - while (height--) - { - core_combine_over_u_sse2 (dst, src, NULL, width); - - dst += dst_stride; - src += src_stride; - } - _mm_empty (); -} - -/* ------------------------------------------------------------------ - * composite_over_8888_0565 - */ -static force_inline uint16_t -composite_over_8888_0565pixel (uint32_t src, uint16_t dst) -{ - __m64 ms; - - ms = unpack_32_1x64 (src); - return pack_565_32_16 ( - pack_1x64_32 ( - over_1x64 ( - ms, expand_alpha_1x64 (ms), expand565_16_1x64 (dst)))); -} - -static void -sse2_composite_over_8888_0565 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint16_t *dst_line, *dst, d; - uint32_t *src_line, *src, s; - int dst_stride, src_stride; - int32_t w; - - __m128i xmm_alpha_lo, xmm_alpha_hi; - __m128i xmm_src, xmm_src_lo, xmm_src_hi; - __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - -#if 0 - /* FIXME - * - * I copy the code from MMX one and keep the fixme. - * If it's a problem there, probably is a problem here. - */ - assert (src_image->drawable == mask_image->drawable); -#endif - - while (height--) - { - dst = dst_line; - src = src_line; - - dst_line += dst_stride; - src_line += src_stride; - w = width; - - /* Align dst on a 16-byte boundary */ - while (w && - ((unsigned long)dst & 15)) - { - s = *src++; - d = *dst; - - *dst++ = composite_over_8888_0565pixel (s, d); - w--; - } - - /* It's a 8 pixel loop */ - while (w >= 8) - { - /* I'm loading unaligned because I'm not sure - * about the address alignment. - */ - xmm_src = load_128_unaligned ((__m128i*) src); - xmm_dst = load_128_aligned ((__m128i*) dst); - - /* Unpacking */ - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_565_128_4x128 (xmm_dst, - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - /* I'm loading next 4 pixels from memory - * before to optimze the memory read. - */ - xmm_src = load_128_unaligned ((__m128i*) (src + 4)); - - over_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_dst0, &xmm_dst1); - - /* Unpacking */ - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi); - - over_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_dst2, &xmm_dst3); - - save_128_aligned ( - (__m128i*)dst, pack_565_4x128_128 ( - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); - - w -= 8; - dst += 8; - src += 8; - } - - while (w--) - { - s = *src++; - d = *dst; - - *dst++ = composite_over_8888_0565pixel (s, d); - } - } - - _mm_empty (); -} - -/* ----------------------------------------------------------------- - * composite_over_n_8_8888 - */ - -static void -sse2_composite_over_n_8_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src, srca; - uint32_t *dst_line, *dst; - uint8_t *mask_line, *mask; - int dst_stride, mask_stride; - int32_t w; - uint32_t m, d; - - __m128i xmm_src, xmm_alpha, xmm_def; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - - __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - srca = src >> 24; - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - - xmm_def = create_mask_2x32_128 (src, src); - xmm_src = expand_pixel_32_1x128 (src); - xmm_alpha = expand_alpha_1x128 (xmm_src); - mmx_src = _mm_movepi64_pi64 (xmm_src); - mmx_alpha = _mm_movepi64_pi64 (xmm_alpha); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - uint8_t m = *mask++; - - if (m) - { - d = *dst; - mmx_mask = expand_pixel_8_1x64 (m); - mmx_dest = unpack_32_1x64 (d); - - *dst = pack_1x64_32 (in_over_1x64 (&mmx_src, - &mmx_alpha, - &mmx_mask, - &mmx_dest)); - } - - w--; - dst++; - } - - while (w >= 4) - { - m = *((uint32_t*)mask); - - if (srca == 0xff && m == 0xffffffff) - { - save_128_aligned ((__m128i*)dst, xmm_def); - } - else if (m) - { - xmm_dst = load_128_aligned ((__m128i*) dst); - xmm_mask = unpack_32_1x128 (m); - xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); - - /* Unpacking */ - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - in_over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - - w -= 4; - dst += 4; - mask += 4; - } - - while (w) - { - uint8_t m = *mask++; - - if (m) - { - d = *dst; - mmx_mask = expand_pixel_8_1x64 (m); - mmx_dest = unpack_32_1x64 (d); - - *dst = pack_1x64_32 (in_over_1x64 (&mmx_src, - &mmx_alpha, - &mmx_mask, - &mmx_dest)); - } - - w--; - dst++; - } - } - - _mm_empty (); -} - -/* ---------------------------------------------------------------- - * composite_over_n_8_8888 - */ - -pixman_bool_t -pixman_fill_sse2 (uint32_t *bits, - int stride, - int bpp, - int x, - int y, - int width, - int height, - uint32_t data) -{ - uint32_t byte_width; - uint8_t *byte_line; - - __m128i xmm_def; - - if (bpp == 8) - { - uint8_t b; - uint16_t w; - - stride = stride * (int) sizeof (uint32_t) / 1; - byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x); - byte_width = width; - stride *= 1; - - b = data & 0xff; - w = (b << 8) | b; - data = (w << 16) | w; - } - else if (bpp == 16) - { - stride = stride * (int) sizeof (uint32_t) / 2; - byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x); - byte_width = 2 * width; - stride *= 2; - - data = (data & 0xffff) * 0x00010001; - } - else if (bpp == 32) - { - stride = stride * (int) sizeof (uint32_t) / 4; - byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x); - byte_width = 4 * width; - stride *= 4; - } - else - { - return FALSE; - } - - xmm_def = create_mask_2x32_128 (data, data); - - while (height--) - { - int w; - uint8_t *d = byte_line; - byte_line += stride; - w = byte_width; - - while (w >= 1 && ((unsigned long)d & 1)) - { - *(uint8_t *)d = data; - w -= 1; - d += 1; - } - - while (w >= 2 && ((unsigned long)d & 3)) - { - *(uint16_t *)d = data; - w -= 2; - d += 2; - } - - while (w >= 4 && ((unsigned long)d & 15)) - { - *(uint32_t *)d = data; - - w -= 4; - d += 4; - } - - while (w >= 128) - { - save_128_aligned ((__m128i*)(d), xmm_def); - save_128_aligned ((__m128i*)(d + 16), xmm_def); - save_128_aligned ((__m128i*)(d + 32), xmm_def); - save_128_aligned ((__m128i*)(d + 48), xmm_def); - save_128_aligned ((__m128i*)(d + 64), xmm_def); - save_128_aligned ((__m128i*)(d + 80), xmm_def); - save_128_aligned ((__m128i*)(d + 96), xmm_def); - save_128_aligned ((__m128i*)(d + 112), xmm_def); - - d += 128; - w -= 128; - } - - if (w >= 64) - { - save_128_aligned ((__m128i*)(d), xmm_def); - save_128_aligned ((__m128i*)(d + 16), xmm_def); - save_128_aligned ((__m128i*)(d + 32), xmm_def); - save_128_aligned ((__m128i*)(d + 48), xmm_def); - - d += 64; - w -= 64; - } - - if (w >= 32) - { - save_128_aligned ((__m128i*)(d), xmm_def); - save_128_aligned ((__m128i*)(d + 16), xmm_def); - - d += 32; - w -= 32; - } - - if (w >= 16) - { - save_128_aligned ((__m128i*)(d), xmm_def); - - d += 16; - w -= 16; - } - - while (w >= 4) - { - *(uint32_t *)d = data; - - w -= 4; - d += 4; - } - - if (w >= 2) - { - *(uint16_t *)d = data; - w -= 2; - d += 2; - } - - if (w >= 1) - { - *(uint8_t *)d = data; - w -= 1; - d += 1; - } - } - - _mm_empty (); - return TRUE; -} - -static void -sse2_composite_src_n_8_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src, srca; - uint32_t *dst_line, *dst; - uint8_t *mask_line, *mask; - int dst_stride, mask_stride; - int32_t w; - uint32_t m; - - __m128i xmm_src, xmm_def; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - srca = src >> 24; - if (src == 0) - { - pixman_fill_sse2 (dst_image->bits.bits, dst_image->bits.rowstride, - PIXMAN_FORMAT_BPP (dst_image->bits.format), - dest_x, dest_y, width, height, 0); - return; - } - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - - xmm_def = create_mask_2x32_128 (src, src); - xmm_src = expand_pixel_32_1x128 (src); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - uint8_t m = *mask++; - - if (m) - { - *dst = pack_1x64_32 ( - pix_multiply_1x64 ( - _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m))); - } - else - { - *dst = 0; - } - - w--; - dst++; - } - - while (w >= 4) - { - m = *((uint32_t*)mask); - - if (srca == 0xff && m == 0xffffffff) - { - save_128_aligned ((__m128i*)dst, xmm_def); - } - else if (m) - { - xmm_mask = unpack_32_1x128 (m); - xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); - - /* Unpacking */ - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - pix_multiply_2x128 (&xmm_src, &xmm_src, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi)); - } - else - { - save_128_aligned ((__m128i*)dst, _mm_setzero_si128 ()); - } - - w -= 4; - dst += 4; - mask += 4; - } - - while (w) - { - uint8_t m = *mask++; - - if (m) - { - *dst = pack_1x64_32 ( - pix_multiply_1x64 ( - _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m))); - } - else - { - *dst = 0; - } - - w--; - dst++; - } - } - - _mm_empty (); -} - -/*----------------------------------------------------------------------- - * composite_over_n_8_0565 - */ - -static void -sse2_composite_over_n_8_0565 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src, srca; - uint16_t *dst_line, *dst, d; - uint8_t *mask_line, *mask; - int dst_stride, mask_stride; - int32_t w; - uint32_t m; - __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest; - - __m128i xmm_src, xmm_alpha; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - srca = src >> 24; - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - - xmm_src = expand_pixel_32_1x128 (src); - xmm_alpha = expand_alpha_1x128 (xmm_src); - mmx_src = _mm_movepi64_pi64 (xmm_src); - mmx_alpha = _mm_movepi64_pi64 (xmm_alpha); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - m = *mask++; - - if (m) - { - d = *dst; - mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m)); - mmx_dest = expand565_16_1x64 (d); - - *dst = pack_565_32_16 ( - pack_1x64_32 ( - in_over_1x64 ( - &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); - } - - w--; - dst++; - } - - while (w >= 8) - { - xmm_dst = load_128_aligned ((__m128i*) dst); - unpack_565_128_4x128 (xmm_dst, - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); - - m = *((uint32_t*)mask); - mask += 4; - - if (m) - { - xmm_mask = unpack_32_1x128 (m); - xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); - - /* Unpacking */ - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - in_over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst0, &xmm_dst1); - } - - m = *((uint32_t*)mask); - mask += 4; - - if (m) - { - xmm_mask = unpack_32_1x128 (m); - xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); - - /* Unpacking */ - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - - expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - in_over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst2, &xmm_dst3); - } - - save_128_aligned ( - (__m128i*)dst, pack_565_4x128_128 ( - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); - - w -= 8; - dst += 8; - } - - while (w) - { - m = *mask++; - - if (m) - { - d = *dst; - mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m)); - mmx_dest = expand565_16_1x64 (d); - - *dst = pack_565_32_16 ( - pack_1x64_32 ( - in_over_1x64 ( - &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); - } - - w--; - dst++; - } - } - - _mm_empty (); -} - -/* ----------------------------------------------------------------------- - * composite_over_pixbuf_0565 - */ - -static void -sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint16_t *dst_line, *dst, d; - uint32_t *src_line, *src, s; - int dst_stride, src_stride; - int32_t w; - uint32_t opaque, zero; - - __m64 ms; - __m128i xmm_src, xmm_src_lo, xmm_src_hi; - __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - -#if 0 - /* FIXME - * - * I copy the code from MMX one and keep the fixme. - * If it's a problem there, probably is a problem here. - */ - assert (src_image->drawable == mask_image->drawable); -#endif - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - src = src_line; - src_line += src_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - s = *src++; - d = *dst; - - ms = unpack_32_1x64 (s); - - *dst++ = pack_565_32_16 ( - pack_1x64_32 ( - over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d)))); - w--; - } - - while (w >= 8) - { - /* First round */ - xmm_src = load_128_unaligned ((__m128i*)src); - xmm_dst = load_128_aligned ((__m128i*)dst); - - opaque = is_opaque (xmm_src); - zero = is_zero (xmm_src); - - unpack_565_128_4x128 (xmm_dst, - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - - /* preload next round*/ - xmm_src = load_128_unaligned ((__m128i*)(src + 4)); - - if (opaque) - { - invert_colors_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_dst0, &xmm_dst1); - } - else if (!zero) - { - over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_dst0, &xmm_dst1); - } - - /* Second round */ - opaque = is_opaque (xmm_src); - zero = is_zero (xmm_src); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - - if (opaque) - { - invert_colors_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_dst2, &xmm_dst3); - } - else if (!zero) - { - over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_dst2, &xmm_dst3); - } - - save_128_aligned ( - (__m128i*)dst, pack_565_4x128_128 ( - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); - - w -= 8; - src += 8; - dst += 8; - } - - while (w) - { - s = *src++; - d = *dst; - - ms = unpack_32_1x64 (s); - - *dst++ = pack_565_32_16 ( - pack_1x64_32 ( - over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d)))); - w--; - } - } - - _mm_empty (); -} - -/* ------------------------------------------------------------------------- - * composite_over_pixbuf_8888 - */ - -static void -sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *dst_line, *dst, d; - uint32_t *src_line, *src, s; - int dst_stride, src_stride; - int32_t w; - uint32_t opaque, zero; - - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_dst_lo, xmm_dst_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - -#if 0 - /* FIXME - * - * I copy the code from MMX one and keep the fixme. - * If it's a problem there, probably is a problem here. - */ - assert (src_image->drawable == mask_image->drawable); -#endif - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - src = src_line; - src_line += src_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - s = *src++; - d = *dst; - - *dst++ = pack_1x64_32 ( - over_rev_non_pre_1x64 ( - unpack_32_1x64 (s), unpack_32_1x64 (d))); - - w--; - } - - while (w >= 4) - { - xmm_src_hi = load_128_unaligned ((__m128i*)src); - - opaque = is_opaque (xmm_src_hi); - zero = is_zero (xmm_src_hi); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - - if (opaque) - { - invert_colors_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - else if (!zero) - { - xmm_dst_hi = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - - w -= 4; - dst += 4; - src += 4; - } - - while (w) - { - s = *src++; - d = *dst; - - *dst++ = pack_1x64_32 ( - over_rev_non_pre_1x64 ( - unpack_32_1x64 (s), unpack_32_1x64 (d))); - - w--; - } - } - - _mm_empty (); -} - -/* ------------------------------------------------------------------------------------------------- - * composite_over_n_8888_0565_ca - */ - -static void -sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src; - uint16_t *dst_line, *dst, d; - uint32_t *mask_line, *mask, m; - int dst_stride, mask_stride; - int w; - uint32_t pack_cmp; - - __m128i xmm_src, xmm_alpha; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; - - __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); - - xmm_src = expand_pixel_32_1x128 (src); - xmm_alpha = expand_alpha_1x128 (xmm_src); - mmx_src = _mm_movepi64_pi64 (xmm_src); - mmx_alpha = _mm_movepi64_pi64 (xmm_alpha); - - while (height--) - { - w = width; - mask = mask_line; - dst = dst_line; - mask_line += mask_stride; - dst_line += dst_stride; - - while (w && ((unsigned long)dst & 15)) - { - m = *(uint32_t *) mask; - - if (m) - { - d = *dst; - mmx_mask = unpack_32_1x64 (m); - mmx_dest = expand565_16_1x64 (d); - - *dst = pack_565_32_16 ( - pack_1x64_32 ( - in_over_1x64 ( - &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); - } - - w--; - dst++; - mask++; - } - - while (w >= 8) - { - /* First round */ - xmm_mask = load_128_unaligned ((__m128i*)mask); - xmm_dst = load_128_aligned ((__m128i*)dst); - - pack_cmp = _mm_movemask_epi8 ( - _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); - - unpack_565_128_4x128 (xmm_dst, - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - - /* preload next round */ - xmm_mask = load_128_unaligned ((__m128i*)(mask + 4)); - - /* preload next round */ - if (pack_cmp != 0xffff) - { - in_over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst0, &xmm_dst1); - } - - /* Second round */ - pack_cmp = _mm_movemask_epi8 ( - _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); - - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - - if (pack_cmp != 0xffff) - { - in_over_2x128 (&xmm_src, &xmm_src, - &xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_dst2, &xmm_dst3); - } - - save_128_aligned ( - (__m128i*)dst, pack_565_4x128_128 ( - &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); - - w -= 8; - dst += 8; - mask += 8; - } - - while (w) - { - m = *(uint32_t *) mask; - - if (m) - { - d = *dst; - mmx_mask = unpack_32_1x64 (m); - mmx_dest = expand565_16_1x64 (d); - - *dst = pack_565_32_16 ( - pack_1x64_32 ( - in_over_1x64 ( - &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); - } - - w--; - dst++; - mask++; - } - } - - _mm_empty (); -} - -/* ----------------------------------------------------------------------- - * composite_in_n_8_8 - */ - -static void -sse2_composite_in_n_8_8 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint8_t *dst_line, *dst; - uint8_t *mask_line, *mask; - int dst_stride, mask_stride; - uint32_t d, m; - uint32_t src; - uint8_t sa; - int32_t w; - - __m128i xmm_alpha; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - sa = src >> 24; - - xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src)); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - w = width; - - while (w && ((unsigned long)dst & 15)) - { - m = (uint32_t) *mask++; - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - pix_multiply_1x64 ( - pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), - unpack_32_1x64 (m)), - unpack_32_1x64 (d))); - w--; - } - - while (w >= 16) - { - xmm_mask = load_128_unaligned ((__m128i*)mask); - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, - &xmm_dst_lo, &xmm_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - mask += 16; - dst += 16; - w -= 16; - } - - while (w) - { - m = (uint32_t) *mask++; - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - pix_multiply_1x64 ( - pix_multiply_1x64 ( - _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)), - unpack_32_1x64 (d))); - w--; - } - } - - _mm_empty (); -} - -/* ----------------------------------------------------------------------- - * composite_in_n_8 - */ - -static void -sse2_composite_in_n_8 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint8_t *dst_line, *dst; - int dst_stride; - uint32_t d; - uint32_t src; - int32_t w; - - __m128i xmm_alpha; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src)); - - src = src >> 24; - - if (src == 0xff) - return; - - if (src == 0x00) - { - pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride, - 8, dest_x, dest_y, width, height, src); - - return; - } - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - w = width; - - while (w && ((unsigned long)dst & 15)) - { - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - pix_multiply_1x64 ( - _mm_movepi64_pi64 (xmm_alpha), - unpack_32_1x64 (d))); - w--; - } - - while (w >= 16) - { - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, - &xmm_dst_lo, &xmm_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - dst += 16; - w -= 16; - } - - while (w) - { - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - pix_multiply_1x64 ( - _mm_movepi64_pi64 (xmm_alpha), - unpack_32_1x64 (d))); - w--; - } - } - - _mm_empty (); -} - -/* --------------------------------------------------------------------------- - * composite_in_8_8 - */ - -static void -sse2_composite_in_8_8 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint8_t *dst_line, *dst; - uint8_t *src_line, *src; - int src_stride, dst_stride; - int32_t w; - uint32_t s, d; - - __m128i xmm_src, xmm_src_lo, xmm_src_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - src = src_line; - src_line += src_stride; - w = width; - - while (w && ((unsigned long)dst & 15)) - { - s = (uint32_t) *src++; - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - pix_multiply_1x64 ( - unpack_32_1x64 (s), unpack_32_1x64 (d))); - w--; - } - - while (w >= 16) - { - xmm_src = load_128_unaligned ((__m128i*)src); - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_dst_lo, &xmm_dst_hi, - &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - src += 16; - dst += 16; - w -= 16; - } - - while (w) - { - s = (uint32_t) *src++; - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d))); - w--; - } - } - - _mm_empty (); -} - -/* ------------------------------------------------------------------------- - * composite_add_n_8_8 - */ - -static void -sse2_composite_add_n_8_8 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint8_t *dst_line, *dst; - uint8_t *mask_line, *mask; - int dst_stride, mask_stride; - int32_t w; - uint32_t src; - uint8_t sa; - uint32_t m, d; - - __m128i xmm_alpha; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - sa = src >> 24; - - xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src)); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - w = width; - - while (w && ((unsigned long)dst & 15)) - { - m = (uint32_t) *mask++; - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - _mm_adds_pu16 ( - pix_multiply_1x64 ( - _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)), - unpack_32_1x64 (d))); - w--; - } - - while (w >= 16) - { - xmm_mask = load_128_unaligned ((__m128i*)mask); - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, - &xmm_mask_lo, &xmm_mask_hi, - &xmm_mask_lo, &xmm_mask_hi); - - xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo); - xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - - mask += 16; - dst += 16; - w -= 16; - } - - while (w) - { - m = (uint32_t) *mask++; - d = (uint32_t) *dst; - - *dst++ = (uint8_t) pack_1x64_32 ( - _mm_adds_pu16 ( - pix_multiply_1x64 ( - _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)), - unpack_32_1x64 (d))); - - w--; - } - } - - _mm_empty (); -} - -/* ------------------------------------------------------------------------- - * composite_add_n_8_8 - */ - -static void -sse2_composite_add_n_8 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint8_t *dst_line, *dst; - int dst_stride; - int32_t w; - uint32_t src; - - __m128i xmm_src; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - src >>= 24; - - if (src == 0x00) - return; - - if (src == 0xff) - { - pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride, - 8, dest_x, dest_y, width, height, 0xff); - - return; - } - - src = (src << 24) | (src << 16) | (src << 8) | src; - xmm_src = _mm_set_epi32 (src, src, src, src); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - w = width; - - while (w && ((unsigned long)dst & 15)) - { - *dst = (uint8_t)_mm_cvtsi64_si32 ( - _mm_adds_pu8 ( - _mm_movepi64_pi64 (xmm_src), - _mm_cvtsi32_si64 (*dst))); - - w--; - dst++; - } - - while (w >= 16) - { - save_128_aligned ( - (__m128i*)dst, _mm_adds_epu8 (xmm_src, load_128_aligned ((__m128i*)dst))); - - dst += 16; - w -= 16; - } - - while (w) - { - *dst = (uint8_t)_mm_cvtsi64_si32 ( - _mm_adds_pu8 ( - _mm_movepi64_pi64 (xmm_src), - _mm_cvtsi32_si64 (*dst))); - - w--; - dst++; - } - } - - _mm_empty (); -} - -/* ---------------------------------------------------------------------- - * composite_add_8_8 - */ - -static void -sse2_composite_add_8_8 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint8_t *dst_line, *dst; - uint8_t *src_line, *src; - int dst_stride, src_stride; - int32_t w; - uint16_t t; - - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); - - while (height--) - { - dst = dst_line; - src = src_line; - - dst_line += dst_stride; - src_line += src_stride; - w = width; - - /* Small head */ - while (w && (unsigned long)dst & 3) - { - t = (*dst) + (*src++); - *dst++ = t | (0 - (t >> 8)); - w--; - } - - core_combine_add_u_sse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2); - - /* Small tail */ - dst += w & 0xfffc; - src += w & 0xfffc; - - w &= 3; - - while (w) - { - t = (*dst) + (*src++); - *dst++ = t | (0 - (t >> 8)); - w--; - } - } - - _mm_empty (); -} - -/* --------------------------------------------------------------------- - * composite_add_8888_8888 - */ -static void -sse2_composite_add_8888_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *dst_line, *dst; - uint32_t *src_line, *src; - int dst_stride, src_stride; - - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - - while (height--) - { - dst = dst_line; - dst_line += dst_stride; - src = src_line; - src_line += src_stride; - - core_combine_add_u_sse2 (dst, src, NULL, width); - } - - _mm_empty (); -} - -/* ------------------------------------------------------------------------------------------------- - * sse2_composite_copy_area - */ - -static pixman_bool_t -pixman_blt_sse2 (uint32_t *src_bits, - uint32_t *dst_bits, - int src_stride, - int dst_stride, - int src_bpp, - int dst_bpp, - int src_x, - int src_y, - int dst_x, - int dst_y, - int width, - int height) -{ - uint8_t * src_bytes; - uint8_t * dst_bytes; - int byte_width; - - if (src_bpp != dst_bpp) - return FALSE; - - if (src_bpp == 16) - { - src_stride = src_stride * (int) sizeof (uint32_t) / 2; - dst_stride = dst_stride * (int) sizeof (uint32_t) / 2; - src_bytes =(uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x)); - dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x)); - byte_width = 2 * width; - src_stride *= 2; - dst_stride *= 2; - } - else if (src_bpp == 32) - { - src_stride = src_stride * (int) sizeof (uint32_t) / 4; - dst_stride = dst_stride * (int) sizeof (uint32_t) / 4; - src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x)); - dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x)); - byte_width = 4 * width; - src_stride *= 4; - dst_stride *= 4; - } - else - { - return FALSE; - } - - while (height--) - { - int w; - uint8_t *s = src_bytes; - uint8_t *d = dst_bytes; - src_bytes += src_stride; - dst_bytes += dst_stride; - w = byte_width; - - while (w >= 2 && ((unsigned long)d & 3)) - { - *(uint16_t *)d = *(uint16_t *)s; - w -= 2; - s += 2; - d += 2; - } - - while (w >= 4 && ((unsigned long)d & 15)) - { - *(uint32_t *)d = *(uint32_t *)s; - - w -= 4; - s += 4; - d += 4; - } - - while (w >= 64) - { - __m128i xmm0, xmm1, xmm2, xmm3; - - xmm0 = load_128_unaligned ((__m128i*)(s)); - xmm1 = load_128_unaligned ((__m128i*)(s + 16)); - xmm2 = load_128_unaligned ((__m128i*)(s + 32)); - xmm3 = load_128_unaligned ((__m128i*)(s + 48)); - - save_128_aligned ((__m128i*)(d), xmm0); - save_128_aligned ((__m128i*)(d + 16), xmm1); - save_128_aligned ((__m128i*)(d + 32), xmm2); - save_128_aligned ((__m128i*)(d + 48), xmm3); - - s += 64; - d += 64; - w -= 64; - } - - while (w >= 16) - { - save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) ); - - w -= 16; - d += 16; - s += 16; - } - - while (w >= 4) - { - *(uint32_t *)d = *(uint32_t *)s; - - w -= 4; - s += 4; - d += 4; - } - - if (w >= 2) - { - *(uint16_t *)d = *(uint16_t *)s; - w -= 2; - s += 2; - d += 2; - } - } - - _mm_empty (); - - return TRUE; -} - -static void -sse2_composite_copy_area (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - pixman_blt_sse2 (src_image->bits.bits, - dst_image->bits.bits, - src_image->bits.rowstride, - dst_image->bits.rowstride, - PIXMAN_FORMAT_BPP (src_image->bits.format), - PIXMAN_FORMAT_BPP (dst_image->bits.format), - src_x, src_y, dest_x, dest_y, width, height); -} - -static void -sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *src, *src_line, s; - uint32_t *dst, *dst_line, d; - uint8_t *mask, *mask_line; - uint32_t m; - int src_stride, mask_stride, dst_stride; - int32_t w; - __m64 ms; - - __m128i xmm_src, xmm_src_lo, xmm_src_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - while (height--) - { - src = src_line; - src_line += src_stride; - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - - w = width; - - while (w && (unsigned long)dst & 15) - { - s = 0xff000000 | *src++; - m = (uint32_t) *mask++; - d = *dst; - ms = unpack_32_1x64 (s); - - if (m != 0xff) - { - __m64 ma = expand_alpha_rev_1x64 (unpack_32_1x64 (m)); - __m64 md = unpack_32_1x64 (d); - - ms = in_over_1x64 (&ms, &mask_x00ff, &ma, &md); - } - - *dst++ = pack_1x64_32 (ms); - w--; - } - - while (w >= 4) - { - m = *(uint32_t*) mask; - xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000); - - if (m == 0xffffffff) - { - save_128_aligned ((__m128i*)dst, xmm_src); - } - else - { - xmm_dst = load_128_aligned ((__m128i*)dst); - - xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128()); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &mask_00ff, &mask_00ff, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - - src += 4; - dst += 4; - mask += 4; - w -= 4; - } - - while (w) - { - m = (uint32_t) *mask++; - - if (m) - { - s = 0xff000000 | *src; - - if (m == 0xff) - { - *dst = s; - } - else - { - __m64 ma, md, ms; - - d = *dst; - - ma = expand_alpha_rev_1x64 (unpack_32_1x64 (m)); - md = unpack_32_1x64 (d); - ms = unpack_32_1x64 (s); - - *dst = pack_1x64_32 (in_over_1x64 (&ms, &mask_x00ff, &ma, &md)); - } - - } - - src++; - dst++; - w--; - } - } - - _mm_empty (); -} - -static void -sse2_composite_over_8888_8_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *src, *src_line, s; - uint32_t *dst, *dst_line, d; - uint8_t *mask, *mask_line; - uint32_t m; - int src_stride, mask_stride, dst_stride; - int32_t w; - - __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - while (height--) - { - src = src_line; - src_line += src_stride; - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - - w = width; - - while (w && (unsigned long)dst & 15) - { - uint32_t sa; - - s = *src++; - m = (uint32_t) *mask++; - d = *dst; - - sa = s >> 24; - - if (m) - { - if (sa == 0xff && m == 0xff) - { - *dst = s; - } - else - { - __m64 ms, md, ma, msa; - - ma = expand_alpha_rev_1x64 (load_32_1x64 (m)); - ms = unpack_32_1x64 (s); - md = unpack_32_1x64 (d); - - msa = expand_alpha_rev_1x64 (load_32_1x64 (sa)); - - *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md)); - } - } - - dst++; - w--; - } - - while (w >= 4) - { - m = *(uint32_t *) mask; - - if (m) - { - xmm_src = load_128_unaligned ((__m128i*)src); - - if (m == 0xffffffff && is_opaque (xmm_src)) - { - save_128_aligned ((__m128i *)dst, xmm_src); - } - else - { - xmm_dst = load_128_aligned ((__m128i *)dst); - - xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128()); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi); - expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi, - &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - } - - src += 4; - dst += 4; - mask += 4; - w -= 4; - } - - while (w) - { - uint32_t sa; - - s = *src++; - m = (uint32_t) *mask++; - d = *dst; - - sa = s >> 24; - - if (m) - { - if (sa == 0xff && m == 0xff) - { - *dst = s; - } - else - { - __m64 ms, md, ma, msa; - - ma = expand_alpha_rev_1x64 (load_32_1x64 (m)); - ms = unpack_32_1x64 (s); - md = unpack_32_1x64 (d); - - msa = expand_alpha_rev_1x64 (load_32_1x64 (sa)); - - *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md)); - } - } - - dst++; - w--; - } - } - - _mm_empty (); -} - -static void -sse2_composite_over_reverse_n_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t src; - uint32_t *dst_line, *dst; - __m128i xmm_src; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_dsta_hi, xmm_dsta_lo; - int dst_stride; - int32_t w; - - src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format); - - if (src == 0) - return; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - - xmm_src = expand_pixel_32_1x128 (src); - - while (height--) - { - dst = dst_line; - - dst_line += dst_stride; - w = width; - - while (w && (unsigned long)dst & 15) - { - __m64 vd; - - vd = unpack_32_1x64 (*dst); - - *dst = pack_1x64_32 (over_1x64 (vd, expand_alpha_1x64 (vd), - _mm_movepi64_pi64 (xmm_src))); - w--; - dst++; - } - - while (w >= 4) - { - __m128i tmp_lo, tmp_hi; - - xmm_dst = load_128_aligned ((__m128i*)dst); - - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dsta_lo, &xmm_dsta_hi); - - tmp_lo = xmm_src; - tmp_hi = xmm_src; - - over_2x128 (&xmm_dst_lo, &xmm_dst_hi, - &xmm_dsta_lo, &xmm_dsta_hi, - &tmp_lo, &tmp_hi); - - save_128_aligned ( - (__m128i*)dst, pack_2x128_128 (tmp_lo, tmp_hi)); - - w -= 4; - dst += 4; - } - - while (w) - { - __m64 vd; - - vd = unpack_32_1x64 (*dst); - - *dst = pack_1x64_32 (over_1x64 (vd, expand_alpha_1x64 (vd), - _mm_movepi64_pi64 (xmm_src))); - w--; - dst++; - } - - } - - _mm_empty (); -} - -static void -sse2_composite_over_8888_8888_8888 (pixman_implementation_t *imp, - pixman_op_t op, - pixman_image_t * src_image, - pixman_image_t * mask_image, - pixman_image_t * dst_image, - int32_t src_x, - int32_t src_y, - int32_t mask_x, - int32_t mask_y, - int32_t dest_x, - int32_t dest_y, - int32_t width, - int32_t height) -{ - uint32_t *src, *src_line, s; - uint32_t *dst, *dst_line, d; - uint32_t *mask, *mask_line; - uint32_t m; - int src_stride, mask_stride, dst_stride; - int32_t w; - - __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi; - __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; - __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; - - PIXMAN_IMAGE_GET_LINE ( - dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); - PIXMAN_IMAGE_GET_LINE ( - mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); - PIXMAN_IMAGE_GET_LINE ( - src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); - - while (height--) - { - src = src_line; - src_line += src_stride; - dst = dst_line; - dst_line += dst_stride; - mask = mask_line; - mask_line += mask_stride; - - w = width; - - while (w && (unsigned long)dst & 15) - { - uint32_t sa; - - s = *src++; - m = (*mask++) >> 24; - d = *dst; - - sa = s >> 24; - - if (m) - { - if (sa == 0xff && m == 0xff) - { - *dst = s; - } - else - { - __m64 ms, md, ma, msa; - - ma = expand_alpha_rev_1x64 (load_32_1x64 (m)); - ms = unpack_32_1x64 (s); - md = unpack_32_1x64 (d); - - msa = expand_alpha_rev_1x64 (load_32_1x64 (sa)); - - *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md)); - } - } - - dst++; - w--; - } - - while (w >= 4) - { - xmm_mask = load_128_unaligned ((__m128i*)mask); - - if (!is_transparent (xmm_mask)) - { - xmm_src = load_128_unaligned ((__m128i*)src); - - if (is_opaque (xmm_mask) && is_opaque (xmm_src)) - { - save_128_aligned ((__m128i *)dst, xmm_src); - } - else - { - xmm_dst = load_128_aligned ((__m128i *)dst); - - unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); - unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi); - expand_alpha_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); - - in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi, - &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); - - save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - } - - src += 4; - dst += 4; - mask += 4; - w -= 4; - } - - while (w) - { - uint32_t sa; - - s = *src++; - m = (*mask++) >> 24; - d = *dst; - - sa = s >> 24; - - if (m) - { - if (sa == 0xff && m == 0xff) - { - *dst = s; - } - else - { - __m64 ms, md, ma, msa; - - ma = expand_alpha_rev_1x64 (load_32_1x64 (m)); - ms = unpack_32_1x64 (s); - md = unpack_32_1x64 (d); - - msa = expand_alpha_rev_1x64 (load_32_1x64 (sa)); - - *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md)); - } - } - - dst++; - w--; - } - } - - _mm_empty (); -} - -/* A variant of 'core_combine_over_u_sse2' with minor tweaks */ -static force_inline void -scaled_nearest_scanline_sse2_8888_8888_OVER (uint32_t* pd, - const uint32_t* ps, - int32_t w, - pixman_fixed_t vx, - pixman_fixed_t unit_x, - pixman_fixed_t max_vx) -{ - uint32_t s, d; - const uint32_t* pm = NULL; - - __m128i xmm_dst_lo, xmm_dst_hi; - __m128i xmm_src_lo, xmm_src_hi; - __m128i xmm_alpha_lo, xmm_alpha_hi; - - /* Align dst on a 16-byte boundary */ - while (w && ((unsigned long)pd & 15)) - { - d = *pd; - s = combine1 (ps + (vx >> 16), pm); - vx += unit_x; - - *pd++ = core_combine_over_u_pixel_sse2 (s, d); - if (pm) - pm++; - w--; - } - - while (w >= 4) - { - __m128i tmp; - uint32_t tmp1, tmp2, tmp3, tmp4; - - tmp1 = ps[vx >> 16]; - vx += unit_x; - tmp2 = ps[vx >> 16]; - vx += unit_x; - tmp3 = ps[vx >> 16]; - vx += unit_x; - tmp4 = ps[vx >> 16]; - vx += unit_x; - - tmp = _mm_set_epi32 (tmp4, tmp3, tmp2, tmp1); - - xmm_src_hi = combine4 ((__m128i*)&tmp, (__m128i*)pm); - - if (is_opaque (xmm_src_hi)) - { - save_128_aligned ((__m128i*)pd, xmm_src_hi); - } - else if (!is_zero (xmm_src_hi)) - { - xmm_dst_hi = load_128_aligned ((__m128i*) pd); - - unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); - unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); - - expand_alpha_2x128 ( - xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); - - over_2x128 (&xmm_src_lo, &xmm_src_hi, - &xmm_alpha_lo, &xmm_alpha_hi, - &xmm_dst_lo, &xmm_dst_hi); - - /* rebuid the 4 pixel data and save*/ - save_128_aligned ((__m128i*)pd, - pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); - } - - w -= 4; - pd += 4; - if (pm) - pm += 4; - } - - while (w) - { - d = *pd; - s = combine1 (ps + (vx >> 16), pm); - vx += unit_x; - - *pd++ = core_combine_over_u_pixel_sse2 (s, d); - if (pm) - pm++; - - w--; - } - _mm_empty (); -} - -FAST_NEAREST_MAINLOOP (sse2_8888_8888_cover_OVER, - scaled_nearest_scanline_sse2_8888_8888_OVER, - uint32_t, uint32_t, COVER) -FAST_NEAREST_MAINLOOP (sse2_8888_8888_none_OVER, - scaled_nearest_scanline_sse2_8888_8888_OVER, - uint32_t, uint32_t, NONE) -FAST_NEAREST_MAINLOOP (sse2_8888_8888_pad_OVER, - scaled_nearest_scanline_sse2_8888_8888_OVER, - uint32_t, uint32_t, PAD) - -static const pixman_fast_path_t sse2_fast_paths[] = -{ - /* PIXMAN_OP_OVER */ - PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, sse2_composite_over_n_8_0565), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, sse2_composite_over_n_8_0565), - PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, sse2_composite_over_n_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, sse2_composite_over_n_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, sse2_composite_over_n_0565), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, sse2_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, sse2_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, sse2_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, sse2_composite_over_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, sse2_composite_over_8888_0565), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, sse2_composite_over_8888_0565), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, sse2_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, sse2_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, sse2_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, sse2_composite_over_n_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, sse2_composite_over_8888_8888_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, sse2_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, sse2_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, sse2_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, sse2_composite_over_8888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, sse2_composite_over_x888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, sse2_composite_over_x888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, sse2_composite_over_x888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, sse2_composite_over_x888_8_8888), - PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, sse2_composite_over_x888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, sse2_composite_over_x888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, sse2_composite_over_x888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, sse2_composite_over_x888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, sse2_composite_over_8888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, sse2_composite_over_8888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, sse2_composite_over_8888_n_8888), - PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, sse2_composite_over_8888_n_8888), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, sse2_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, sse2_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, sse2_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, sse2_composite_over_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, sse2_composite_over_n_8888_0565_ca), - PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, sse2_composite_over_n_8888_0565_ca), - PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, sse2_composite_over_pixbuf_8888), - PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, sse2_composite_over_pixbuf_8888), - PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, sse2_composite_over_pixbuf_8888), - PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, sse2_composite_over_pixbuf_8888), - PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, sse2_composite_over_pixbuf_0565), - PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, sse2_composite_over_pixbuf_0565), - PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), - - /* PIXMAN_OP_OVER_REVERSE */ - PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, sse2_composite_over_reverse_n_8888), - PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, sse2_composite_over_reverse_n_8888), - - /* PIXMAN_OP_ADD */ - PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, sse2_composite_add_n_8888_8888_ca), - PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, sse2_composite_add_8_8), - PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, sse2_composite_add_8888_8888), - PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, sse2_composite_add_8888_8888), - PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, sse2_composite_add_n_8_8), - PIXMAN_STD_FAST_PATH (ADD, solid, null, a8, sse2_composite_add_n_8), - - /* PIXMAN_OP_SRC */ - PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, sse2_composite_src_n_8_8888), - PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, sse2_composite_src_n_8_8888), - PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, sse2_composite_src_n_8_8888), - PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, sse2_composite_src_n_8_8888), - PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, sse2_composite_src_x888_8888), - PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, sse2_composite_src_x888_8888), - PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, sse2_composite_copy_area), - PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, sse2_composite_copy_area), - - /* PIXMAN_OP_IN */ - PIXMAN_STD_FAST_PATH (IN, a8, null, a8, sse2_composite_in_8_8), - PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, sse2_composite_in_n_8_8), - PIXMAN_STD_FAST_PATH (IN, solid, null, a8, sse2_composite_in_n_8), - - SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888), - SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888), - - { PIXMAN_OP_NONE }, -}; - -static pixman_bool_t -sse2_blt (pixman_implementation_t *imp, - uint32_t * src_bits, - uint32_t * dst_bits, - int src_stride, - int dst_stride, - int src_bpp, - int dst_bpp, - int src_x, - int src_y, - int dst_x, - int dst_y, - int width, - int height) -{ - if (!pixman_blt_sse2 ( - src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, - src_x, src_y, dst_x, dst_y, width, height)) - - { - return _pixman_implementation_blt ( - imp->delegate, - src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, - src_x, src_y, dst_x, dst_y, width, height); - } - - return TRUE; -} - -#if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__) -__attribute__((__force_align_arg_pointer__)) -#endif -static pixman_bool_t -sse2_fill (pixman_implementation_t *imp, - uint32_t * bits, - int stride, - int bpp, - int x, - int y, - int width, - int height, - uint32_t xor) -{ - if (!pixman_fill_sse2 (bits, stride, bpp, x, y, width, height, xor)) - { - return _pixman_implementation_fill ( - imp->delegate, bits, stride, bpp, x, y, width, height, xor); - } - - return TRUE; -} - -static uint32_t * -sse2_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask) -{ - int w = iter->width; - __m128i ff000000 = mask_ff000000; - uint32_t *dst = iter->buffer; - uint32_t *src = (uint32_t *)iter->bits; - - iter->bits += iter->stride; - - while (w && ((unsigned long)dst) & 0x0f) - { - *dst++ = (*src++) | 0xff000000; - w--; - } - - while (w >= 4) - { - save_128_aligned ( - (__m128i *)dst, _mm_or_si128 ( - load_128_unaligned ((__m128i *)src), ff000000)); - - dst += 4; - src += 4; - w -= 4; - } - - while (w) - { - *dst++ = (*src++) | 0xff000000; - w--; - } - - return iter->buffer; -} - -static uint32_t * -sse2_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask) -{ - int w = iter->width; - uint32_t *dst = iter->buffer; - uint16_t *src = (uint16_t *)iter->bits; - __m128i ff000000 = mask_ff000000; - - iter->bits += iter->stride; - - while (w && ((unsigned long)dst) & 0x0f) - { - uint16_t s = *src++; - - *dst++ = CONVERT_0565_TO_8888 (s); - w--; - } - - while (w >= 8) - { - __m128i lo, hi, s; - - s = _mm_loadu_si128 ((__m128i *)src); - - lo = unpack_565_to_8888 (_mm_unpacklo_epi16 (s, _mm_setzero_si128 ())); - hi = unpack_565_to_8888 (_mm_unpackhi_epi16 (s, _mm_setzero_si128 ())); - - save_128_aligned ((__m128i *)(dst + 0), _mm_or_si128 (lo, ff000000)); - save_128_aligned ((__m128i *)(dst + 4), _mm_or_si128 (hi, ff000000)); - - dst += 8; - src += 8; - w -= 8; - } - - while (w) - { - uint16_t s = *src++; - - *dst++ = CONVERT_0565_TO_8888 (s); - w--; - } - - return iter->buffer; -} - -static uint32_t * -sse2_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask) -{ - int w = iter->width; - uint32_t *dst = iter->buffer; - uint8_t *src = iter->bits; - __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6; - - iter->bits += iter->stride; - - while (w && (((unsigned long)dst) & 15)) - { - *dst++ = *(src++) << 24; - w--; - } - - while (w >= 16) - { - xmm0 = _mm_loadu_si128((__m128i *)src); - - xmm1 = _mm_unpacklo_epi8 (_mm_setzero_si128(), xmm0); - xmm2 = _mm_unpackhi_epi8 (_mm_setzero_si128(), xmm0); - xmm3 = _mm_unpacklo_epi16 (_mm_setzero_si128(), xmm1); - xmm4 = _mm_unpackhi_epi16 (_mm_setzero_si128(), xmm1); - xmm5 = _mm_unpacklo_epi16 (_mm_setzero_si128(), xmm2); - xmm6 = _mm_unpackhi_epi16 (_mm_setzero_si128(), xmm2); - - _mm_store_si128(((__m128i *)(dst + 0)), xmm3); - _mm_store_si128(((__m128i *)(dst + 4)), xmm4); - _mm_store_si128(((__m128i *)(dst + 8)), xmm5); - _mm_store_si128(((__m128i *)(dst + 12)), xmm6); - - dst += 16; - src += 16; - w -= 16; - } - - while (w) - { - *dst++ = *(src++) << 24; - w--; - } - - return iter->buffer; -} - -typedef struct -{ - pixman_format_code_t format; - pixman_iter_get_scanline_t get_scanline; -} fetcher_info_t; - -static const fetcher_info_t fetchers[] = -{ - { PIXMAN_x8r8g8b8, sse2_fetch_x8r8g8b8 }, - { PIXMAN_r5g6b5, sse2_fetch_r5g6b5 }, - { PIXMAN_a8, sse2_fetch_a8 }, - { PIXMAN_null } -}; - -static void -sse2_src_iter_init (pixman_implementation_t *imp, - pixman_iter_t *iter, - pixman_image_t *image, - int x, int y, int width, int height, - uint8_t *buffer, iter_flags_t flags) -{ -#define FLAGS \ - (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM) - - if ((flags & ITER_NARROW) && - (image->common.flags & FLAGS) == FLAGS && - x >= 0 && y >= 0 && - x + width <= image->bits.width && - y + height <= image->bits.height) - { - const fetcher_info_t *f; - - for (f = &fetchers[0]; f->format != PIXMAN_null; f++) - { - if (image->common.extended_format_code == f->format) - { - uint8_t *b = (uint8_t *)image->bits.bits; - int s = image->bits.rowstride * 4; - - iter->bits = b + s * y + x * PIXMAN_FORMAT_BPP (f->format) / 8; - iter->stride = s; - iter->width = width; - iter->buffer = (uint32_t *)buffer; - - iter->get_scanline = f->get_scanline; - return; - } - } - } - - _pixman_implementation_src_iter_init ( - imp->delegate, iter, image, x, y, width, height, buffer, flags); -} - -#if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__) -__attribute__((__force_align_arg_pointer__)) -#endif -pixman_implementation_t * -_pixman_implementation_create_sse2 (pixman_implementation_t *fallback) -{ - pixman_implementation_t *imp = _pixman_implementation_create (fallback, sse2_fast_paths); - - /* SSE2 constants */ - mask_565_r = create_mask_2x32_128 (0x00f80000, 0x00f80000); - mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000); - mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0); - mask_565_b = create_mask_2x32_128 (0x0000001f, 0x0000001f); - mask_red = create_mask_2x32_128 (0x00f80000, 0x00f80000); - mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00); - mask_blue = create_mask_2x32_128 (0x000000f8, 0x000000f8); - mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0); - mask_565_fix_g = create_mask_2x32_128 (0x0000c000, 0x0000c000); - mask_0080 = create_mask_16_128 (0x0080); - mask_00ff = create_mask_16_128 (0x00ff); - mask_0101 = create_mask_16_128 (0x0101); - mask_ffff = create_mask_16_128 (0xffff); - mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000); - mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000); - - /* MMX constants */ - mask_x565_rgb = create_mask_2x32_64 (0x000001f0, 0x003f001f); - mask_x565_unpack = create_mask_2x32_64 (0x00000084, 0x04100840); - - mask_x0080 = create_mask_16_64 (0x0080); - mask_x00ff = create_mask_16_64 (0x00ff); - mask_x0101 = create_mask_16_64 (0x0101); - mask_x_alpha = create_mask_2x32_64 (0x00ff0000, 0x00000000); - - _mm_empty (); - - /* Set up function pointers */ - - /* SSE code patch for fbcompose.c */ - imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u; - imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_u; - imp->combine_32[PIXMAN_OP_IN] = sse2_combine_in_u; - imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_u; - imp->combine_32[PIXMAN_OP_OUT] = sse2_combine_out_u; - imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_u; - imp->combine_32[PIXMAN_OP_ATOP] = sse2_combine_atop_u; - imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_u; - imp->combine_32[PIXMAN_OP_XOR] = sse2_combine_xor_u; - imp->combine_32[PIXMAN_OP_ADD] = sse2_combine_add_u; - - imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u; - - imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_ca; - imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_ca; - imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_ca; - imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_ca; - imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_ca; - imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_ca; - imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_ca; - imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_ca; - imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_ca; - imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_ca; - imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_ca; - - imp->blt = sse2_blt; - imp->fill = sse2_fill; - - imp->src_iter_init = sse2_src_iter_init; - - return imp; -} - -#endif /* USE_SSE2 */ +/*
+ * Copyright © 2008 Rodrigo Kumpera
+ * Copyright © 2008 André Tupinambá
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Red Hat not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. Red Hat makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author: Rodrigo Kumpera (kumpera@gmail.com)
+ * André Tupinambá (andrelrt@gmail.com)
+ *
+ * Based on work by Owen Taylor and Søren Sandmann
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <mmintrin.h>
+#include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */
+#include <emmintrin.h> /* for SSE2 intrinsics */
+#include "pixman-private.h"
+#include "pixman-combine32.h"
+#include "pixman-fast-path.h"
+
+#if defined(_MSC_VER) && defined(_M_AMD64)
+/* Windows 64 doesn't allow MMX to be used, so
+ * the pixman-x64-mmx-emulation.h file contains
+ * implementations of those MMX intrinsics that
+ * are used in the SSE2 implementation.
+ */
+# include "pixman-x64-mmx-emulation.h"
+#endif
+
+#ifdef USE_SSE2
+
+/* --------------------------------------------------------------------
+ * Locals
+ */
+
+static __m64 mask_x0080;
+static __m64 mask_x00ff;
+static __m64 mask_x0101;
+static __m64 mask_x_alpha;
+
+static __m64 mask_x565_rgb;
+static __m64 mask_x565_unpack;
+
+static __m128i mask_0080;
+static __m128i mask_00ff;
+static __m128i mask_0101;
+static __m128i mask_ffff;
+static __m128i mask_ff000000;
+static __m128i mask_alpha;
+
+static __m128i mask_565_r;
+static __m128i mask_565_g1, mask_565_g2;
+static __m128i mask_565_b;
+static __m128i mask_red;
+static __m128i mask_green;
+static __m128i mask_blue;
+
+static __m128i mask_565_fix_rb;
+static __m128i mask_565_fix_g;
+
+/* ----------------------------------------------------------------------
+ * SSE2 Inlines
+ */
+static force_inline __m128i
+unpack_32_1x128 (uint32_t data)
+{
+ return _mm_unpacklo_epi8 (_mm_cvtsi32_si128 (data), _mm_setzero_si128 ());
+}
+
+static force_inline void
+unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi)
+{
+ *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ());
+ *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ());
+}
+
+static force_inline __m128i
+unpack_565_to_8888 (__m128i lo)
+{
+ __m128i r, g, b, rb, t;
+
+ r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red);
+ g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green);
+ b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue);
+
+ rb = _mm_or_si128 (r, b);
+ t = _mm_and_si128 (rb, mask_565_fix_rb);
+ t = _mm_srli_epi32 (t, 5);
+ rb = _mm_or_si128 (rb, t);
+
+ t = _mm_and_si128 (g, mask_565_fix_g);
+ t = _mm_srli_epi32 (t, 6);
+ g = _mm_or_si128 (g, t);
+
+ return _mm_or_si128 (rb, g);
+}
+
+static force_inline void
+unpack_565_128_4x128 (__m128i data,
+ __m128i* data0,
+ __m128i* data1,
+ __m128i* data2,
+ __m128i* data3)
+{
+ __m128i lo, hi;
+
+ lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ());
+ hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ());
+
+ lo = unpack_565_to_8888 (lo);
+ hi = unpack_565_to_8888 (hi);
+
+ unpack_128_2x128 (lo, data0, data1);
+ unpack_128_2x128 (hi, data2, data3);
+}
+
+static force_inline uint16_t
+pack_565_32_16 (uint32_t pixel)
+{
+ return (uint16_t) (((pixel >> 8) & 0xf800) |
+ ((pixel >> 5) & 0x07e0) |
+ ((pixel >> 3) & 0x001f));
+}
+
+static force_inline __m128i
+pack_2x128_128 (__m128i lo, __m128i hi)
+{
+ return _mm_packus_epi16 (lo, hi);
+}
+
+static force_inline __m128i
+pack_565_2x128_128 (__m128i lo, __m128i hi)
+{
+ __m128i data;
+ __m128i r, g1, g2, b;
+
+ data = pack_2x128_128 (lo, hi);
+
+ r = _mm_and_si128 (data, mask_565_r);
+ g1 = _mm_and_si128 (_mm_slli_epi32 (data, 3), mask_565_g1);
+ g2 = _mm_and_si128 (_mm_srli_epi32 (data, 5), mask_565_g2);
+ b = _mm_and_si128 (_mm_srli_epi32 (data, 3), mask_565_b);
+
+ return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b);
+}
+
+static force_inline __m128i
+pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3)
+{
+ return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1),
+ pack_565_2x128_128 (*xmm2, *xmm3));
+}
+
+static force_inline int
+is_opaque (__m128i x)
+{
+ __m128i ffs = _mm_cmpeq_epi8 (x, x);
+
+ return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888;
+}
+
+static force_inline int
+is_zero (__m128i x)
+{
+ return _mm_movemask_epi8 (
+ _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) == 0xffff;
+}
+
+static force_inline int
+is_transparent (__m128i x)
+{
+ return (_mm_movemask_epi8 (
+ _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) & 0x8888) == 0x8888;
+}
+
+static force_inline __m128i
+expand_pixel_32_1x128 (uint32_t data)
+{
+ return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE (1, 0, 1, 0));
+}
+
+static force_inline __m128i
+expand_alpha_1x128 (__m128i data)
+{
+ return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data,
+ _MM_SHUFFLE (3, 3, 3, 3)),
+ _MM_SHUFFLE (3, 3, 3, 3));
+}
+
+static force_inline void
+expand_alpha_2x128 (__m128i data_lo,
+ __m128i data_hi,
+ __m128i* alpha_lo,
+ __m128i* alpha_hi)
+{
+ __m128i lo, hi;
+
+ lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 3, 3, 3));
+ hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 3, 3, 3));
+
+ *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 3, 3, 3));
+ *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 3, 3, 3));
+}
+
+static force_inline void
+expand_alpha_rev_2x128 (__m128i data_lo,
+ __m128i data_hi,
+ __m128i* alpha_lo,
+ __m128i* alpha_hi)
+{
+ __m128i lo, hi;
+
+ lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (0, 0, 0, 0));
+ hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (0, 0, 0, 0));
+ *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (0, 0, 0, 0));
+ *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (0, 0, 0, 0));
+}
+
+static force_inline void
+pix_multiply_2x128 (__m128i* data_lo,
+ __m128i* data_hi,
+ __m128i* alpha_lo,
+ __m128i* alpha_hi,
+ __m128i* ret_lo,
+ __m128i* ret_hi)
+{
+ __m128i lo, hi;
+
+ lo = _mm_mullo_epi16 (*data_lo, *alpha_lo);
+ hi = _mm_mullo_epi16 (*data_hi, *alpha_hi);
+ lo = _mm_adds_epu16 (lo, mask_0080);
+ hi = _mm_adds_epu16 (hi, mask_0080);
+ *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
+ *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
+}
+
+static force_inline void
+pix_add_multiply_2x128 (__m128i* src_lo,
+ __m128i* src_hi,
+ __m128i* alpha_dst_lo,
+ __m128i* alpha_dst_hi,
+ __m128i* dst_lo,
+ __m128i* dst_hi,
+ __m128i* alpha_src_lo,
+ __m128i* alpha_src_hi,
+ __m128i* ret_lo,
+ __m128i* ret_hi)
+{
+ __m128i t1_lo, t1_hi;
+ __m128i t2_lo, t2_hi;
+
+ pix_multiply_2x128 (src_lo, src_hi, alpha_dst_lo, alpha_dst_hi, &t1_lo, &t1_hi);
+ pix_multiply_2x128 (dst_lo, dst_hi, alpha_src_lo, alpha_src_hi, &t2_lo, &t2_hi);
+
+ *ret_lo = _mm_adds_epu8 (t1_lo, t2_lo);
+ *ret_hi = _mm_adds_epu8 (t1_hi, t2_hi);
+}
+
+static force_inline void
+negate_2x128 (__m128i data_lo,
+ __m128i data_hi,
+ __m128i* neg_lo,
+ __m128i* neg_hi)
+{
+ *neg_lo = _mm_xor_si128 (data_lo, mask_00ff);
+ *neg_hi = _mm_xor_si128 (data_hi, mask_00ff);
+}
+
+static force_inline void
+invert_colors_2x128 (__m128i data_lo,
+ __m128i data_hi,
+ __m128i* inv_lo,
+ __m128i* inv_hi)
+{
+ __m128i lo, hi;
+
+ lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 0, 1, 2));
+ hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 0, 1, 2));
+ *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 0, 1, 2));
+ *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 0, 1, 2));
+}
+
+static force_inline void
+over_2x128 (__m128i* src_lo,
+ __m128i* src_hi,
+ __m128i* alpha_lo,
+ __m128i* alpha_hi,
+ __m128i* dst_lo,
+ __m128i* dst_hi)
+{
+ __m128i t1, t2;
+
+ negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2);
+
+ pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi);
+
+ *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo);
+ *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi);
+}
+
+static force_inline void
+over_rev_non_pre_2x128 (__m128i src_lo,
+ __m128i src_hi,
+ __m128i* dst_lo,
+ __m128i* dst_hi)
+{
+ __m128i lo, hi;
+ __m128i alpha_lo, alpha_hi;
+
+ expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi);
+
+ lo = _mm_or_si128 (alpha_lo, mask_alpha);
+ hi = _mm_or_si128 (alpha_hi, mask_alpha);
+
+ invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi);
+
+ pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi);
+
+ over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi);
+}
+
+static force_inline void
+in_over_2x128 (__m128i* src_lo,
+ __m128i* src_hi,
+ __m128i* alpha_lo,
+ __m128i* alpha_hi,
+ __m128i* mask_lo,
+ __m128i* mask_hi,
+ __m128i* dst_lo,
+ __m128i* dst_hi)
+{
+ __m128i s_lo, s_hi;
+ __m128i a_lo, a_hi;
+
+ pix_multiply_2x128 (src_lo, src_hi, mask_lo, mask_hi, &s_lo, &s_hi);
+ pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi);
+
+ over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi);
+}
+
+/* load 4 pixels from a 16-byte boundary aligned address */
+static force_inline __m128i
+load_128_aligned (__m128i* src)
+{
+ return _mm_load_si128 (src);
+}
+
+/* load 4 pixels from a unaligned address */
+static force_inline __m128i
+load_128_unaligned (const __m128i* src)
+{
+ return _mm_loadu_si128 (src);
+}
+
+/* save 4 pixels using Write Combining memory on a 16-byte
+ * boundary aligned address
+ */
+static force_inline void
+save_128_write_combining (__m128i* dst,
+ __m128i data)
+{
+ _mm_stream_si128 (dst, data);
+}
+
+/* save 4 pixels on a 16-byte boundary aligned address */
+static force_inline void
+save_128_aligned (__m128i* dst,
+ __m128i data)
+{
+ _mm_store_si128 (dst, data);
+}
+
+/* save 4 pixels on a unaligned address */
+static force_inline void
+save_128_unaligned (__m128i* dst,
+ __m128i data)
+{
+ _mm_storeu_si128 (dst, data);
+}
+
+/* ------------------------------------------------------------------
+ * MMX inlines
+ */
+
+static force_inline __m64
+load_32_1x64 (uint32_t data)
+{
+ return _mm_cvtsi32_si64 (data);
+}
+
+static force_inline __m64
+unpack_32_1x64 (uint32_t data)
+{
+ return _mm_unpacklo_pi8 (load_32_1x64 (data), _mm_setzero_si64 ());
+}
+
+static force_inline __m64
+expand_alpha_1x64 (__m64 data)
+{
+ return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 3, 3, 3));
+}
+
+static force_inline __m64
+expand_alpha_rev_1x64 (__m64 data)
+{
+ return _mm_shuffle_pi16 (data, _MM_SHUFFLE (0, 0, 0, 0));
+}
+
+static force_inline __m64
+expand_pixel_8_1x64 (uint8_t data)
+{
+ return _mm_shuffle_pi16 (
+ unpack_32_1x64 ((uint32_t)data), _MM_SHUFFLE (0, 0, 0, 0));
+}
+
+static force_inline __m64
+pix_multiply_1x64 (__m64 data,
+ __m64 alpha)
+{
+ return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha),
+ mask_x0080),
+ mask_x0101);
+}
+
+static force_inline __m64
+pix_add_multiply_1x64 (__m64* src,
+ __m64* alpha_dst,
+ __m64* dst,
+ __m64* alpha_src)
+{
+ __m64 t1 = pix_multiply_1x64 (*src, *alpha_dst);
+ __m64 t2 = pix_multiply_1x64 (*dst, *alpha_src);
+
+ return _mm_adds_pu8 (t1, t2);
+}
+
+static force_inline __m64
+negate_1x64 (__m64 data)
+{
+ return _mm_xor_si64 (data, mask_x00ff);
+}
+
+static force_inline __m64
+invert_colors_1x64 (__m64 data)
+{
+ return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 0, 1, 2));
+}
+
+static force_inline __m64
+over_1x64 (__m64 src, __m64 alpha, __m64 dst)
+{
+ return _mm_adds_pu8 (src, pix_multiply_1x64 (dst, negate_1x64 (alpha)));
+}
+
+static force_inline __m64
+in_over_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst)
+{
+ return over_1x64 (pix_multiply_1x64 (*src, *mask),
+ pix_multiply_1x64 (*alpha, *mask),
+ *dst);
+}
+
+static force_inline __m64
+over_rev_non_pre_1x64 (__m64 src, __m64 dst)
+{
+ __m64 alpha = expand_alpha_1x64 (src);
+
+ return over_1x64 (pix_multiply_1x64 (invert_colors_1x64 (src),
+ _mm_or_si64 (alpha, mask_x_alpha)),
+ alpha,
+ dst);
+}
+
+static force_inline uint32_t
+pack_1x64_32 (__m64 data)
+{
+ return _mm_cvtsi64_si32 (_mm_packs_pu16 (data, _mm_setzero_si64 ()));
+}
+
+/* Expand 16 bits positioned at @pos (0-3) of a mmx register into
+ *
+ * 00RR00GG00BB
+ *
+ * --- Expanding 565 in the low word ---
+ *
+ * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
+ * m = m & (01f0003f001f);
+ * m = m * (008404100840);
+ * m = m >> 8;
+ *
+ * Note the trick here - the top word is shifted by another nibble to
+ * avoid it bumping into the middle word
+ */
+static force_inline __m64
+expand565_16_1x64 (uint16_t pixel)
+{
+ __m64 p;
+ __m64 t1, t2;
+
+ p = _mm_cvtsi32_si64 ((uint32_t) pixel);
+
+ t1 = _mm_slli_si64 (p, 36 - 11);
+ t2 = _mm_slli_si64 (p, 16 - 5);
+
+ p = _mm_or_si64 (t1, p);
+ p = _mm_or_si64 (t2, p);
+ p = _mm_and_si64 (p, mask_x565_rgb);
+ p = _mm_mullo_pi16 (p, mask_x565_unpack);
+
+ return _mm_srli_pi16 (p, 8);
+}
+
+/* ----------------------------------------------------------------------------
+ * Compose Core transformations
+ */
+static force_inline uint32_t
+core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst)
+{
+ uint8_t a;
+ __m64 ms;
+
+ a = src >> 24;
+
+ if (a == 0xff)
+ {
+ return src;
+ }
+ else if (src)
+ {
+ ms = unpack_32_1x64 (src);
+ return pack_1x64_32 (
+ over_1x64 (ms, expand_alpha_1x64 (ms), unpack_32_1x64 (dst)));
+ }
+
+ return dst;
+}
+
+static force_inline uint32_t
+combine1 (const uint32_t *ps, const uint32_t *pm)
+{
+ uint32_t s = *ps;
+
+ if (pm)
+ {
+ __m64 ms, mm;
+
+ mm = unpack_32_1x64 (*pm);
+ mm = expand_alpha_1x64 (mm);
+
+ ms = unpack_32_1x64 (s);
+ ms = pix_multiply_1x64 (ms, mm);
+
+ s = pack_1x64_32 (ms);
+ }
+
+ return s;
+}
+
+static force_inline __m128i
+combine4 (const __m128i *ps, const __m128i *pm)
+{
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_msk_lo, xmm_msk_hi;
+ __m128i s;
+
+ if (pm)
+ {
+ xmm_msk_lo = load_128_unaligned (pm);
+
+ if (is_transparent (xmm_msk_lo))
+ return _mm_setzero_si128 ();
+ }
+
+ s = load_128_unaligned (ps);
+
+ if (pm)
+ {
+ unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi);
+
+ expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_msk_lo, &xmm_msk_hi,
+ &xmm_src_lo, &xmm_src_hi);
+
+ s = pack_2x128_128 (xmm_src_lo, xmm_src_hi);
+ }
+
+ return s;
+}
+
+static force_inline void
+core_combine_over_u_sse2_mask (uint32_t * pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ uint32_t s, d;
+
+ /* Align dst on a 16-byte boundary */
+ while (w && ((unsigned long)pd & 15))
+ {
+ d = *pd;
+ s = combine1 (ps, pm);
+
+ if (s)
+ *pd = core_combine_over_u_pixel_sse2 (s, d);
+ pd++;
+ ps++;
+ pm++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ __m128i mask = load_128_unaligned ((__m128i *)pm);
+
+ if (!is_zero (mask))
+ {
+ __m128i src;
+ __m128i src_hi, src_lo;
+ __m128i mask_hi, mask_lo;
+ __m128i alpha_hi, alpha_lo;
+
+ src = load_128_unaligned ((__m128i *)ps);
+
+ if (is_opaque (_mm_and_si128 (src, mask)))
+ {
+ save_128_aligned ((__m128i *)pd, src);
+ }
+ else
+ {
+ __m128i dst = load_128_aligned ((__m128i *)pd);
+ __m128i dst_hi, dst_lo;
+
+ unpack_128_2x128 (mask, &mask_lo, &mask_hi);
+ unpack_128_2x128 (src, &src_lo, &src_hi);
+
+ expand_alpha_2x128 (mask_lo, mask_hi, &mask_lo, &mask_hi);
+ pix_multiply_2x128 (&src_lo, &src_hi,
+ &mask_lo, &mask_hi,
+ &src_lo, &src_hi);
+
+ unpack_128_2x128 (dst, &dst_lo, &dst_hi);
+
+ expand_alpha_2x128 (src_lo, src_hi,
+ &alpha_lo, &alpha_hi);
+
+ over_2x128 (&src_lo, &src_hi, &alpha_lo, &alpha_hi,
+ &dst_lo, &dst_hi);
+
+ save_128_aligned (
+ (__m128i *)pd,
+ pack_2x128_128 (dst_lo, dst_hi));
+ }
+ }
+
+ pm += 4;
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ }
+ while (w)
+ {
+ d = *pd;
+ s = combine1 (ps, pm);
+
+ if (s)
+ *pd = core_combine_over_u_pixel_sse2 (s, d);
+ pd++;
+ ps++;
+ pm++;
+
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_over_u_sse2_no_mask (uint32_t * pd,
+ const uint32_t* ps,
+ int w)
+{
+ uint32_t s, d;
+
+ /* Align dst on a 16-byte boundary */
+ while (w && ((unsigned long)pd & 15))
+ {
+ d = *pd;
+ s = *ps;
+
+ if (s)
+ *pd = core_combine_over_u_pixel_sse2 (s, d);
+ pd++;
+ ps++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ __m128i src;
+ __m128i src_hi, src_lo, dst_hi, dst_lo;
+ __m128i alpha_hi, alpha_lo;
+
+ src = load_128_unaligned ((__m128i *)ps);
+
+ if (!is_zero (src))
+ {
+ if (is_opaque (src))
+ {
+ save_128_aligned ((__m128i *)pd, src);
+ }
+ else
+ {
+ __m128i dst = load_128_aligned ((__m128i *)pd);
+
+ unpack_128_2x128 (src, &src_lo, &src_hi);
+ unpack_128_2x128 (dst, &dst_lo, &dst_hi);
+
+ expand_alpha_2x128 (src_lo, src_hi,
+ &alpha_lo, &alpha_hi);
+ over_2x128 (&src_lo, &src_hi, &alpha_lo, &alpha_hi,
+ &dst_lo, &dst_hi);
+
+ save_128_aligned (
+ (__m128i *)pd,
+ pack_2x128_128 (dst_lo, dst_hi));
+ }
+ }
+
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ }
+ while (w)
+ {
+ d = *pd;
+ s = *ps;
+
+ if (s)
+ *pd = core_combine_over_u_pixel_sse2 (s, d);
+ pd++;
+ ps++;
+
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_over_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ if (pm)
+ core_combine_over_u_sse2_mask (pd, ps, pm, w);
+ else
+ core_combine_over_u_sse2_no_mask (pd, ps, w);
+}
+
+static force_inline void
+core_combine_over_reverse_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ uint32_t s, d;
+
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+
+ /* Align dst on a 16-byte boundary */
+ while (w &&
+ ((unsigned long)pd & 15))
+ {
+ d = *pd;
+ s = combine1 (ps, pm);
+
+ *pd++ = core_combine_over_u_pixel_sse2 (d, s);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ /* I'm loading unaligned because I'm not sure
+ * about the address alignment.
+ */
+ xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_src_lo, &xmm_src_hi);
+
+ /* rebuid the 4 pixel data and save*/
+ save_128_aligned ((__m128i*)pd,
+ pack_2x128_128 (xmm_src_lo, xmm_src_hi));
+
+ w -= 4;
+ ps += 4;
+ pd += 4;
+
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ d = *pd;
+ s = combine1 (ps, pm);
+
+ *pd++ = core_combine_over_u_pixel_sse2 (d, s);
+ ps++;
+ w--;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline uint32_t
+core_combine_in_u_pixelsse2 (uint32_t src, uint32_t dst)
+{
+ uint32_t maska = src >> 24;
+
+ if (maska == 0)
+ {
+ return 0;
+ }
+ else if (maska != 0xff)
+ {
+ return pack_1x64_32 (
+ pix_multiply_1x64 (unpack_32_1x64 (dst),
+ expand_alpha_1x64 (unpack_32_1x64 (src))));
+ }
+
+ return dst;
+}
+
+static force_inline void
+core_combine_in_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ uint32_t s, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+
+ while (w && ((unsigned long) pd & 15))
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_in_u_pixelsse2 (d, s);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+ xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned ((__m128i*)pd,
+ pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_in_u_pixelsse2 (d, s);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline void
+core_combine_reverse_in_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+
+ while (w && ((unsigned long) pd & 15))
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_in_u_pixelsse2 (s, d);
+ ps++;
+ w--;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+ xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_src_lo, &xmm_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_in_u_pixelsse2 (s, d);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline void
+core_combine_reverse_out_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ while (w && ((unsigned long) pd & 15))
+ {
+ uint32_t s = combine1 (ps, pm);
+ uint32_t d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (d), negate_1x64 (
+ expand_alpha_1x64 (unpack_32_1x64 (s)))));
+
+ if (pm)
+ pm++;
+ ps++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+
+ xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ negate_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+
+ pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_src_lo, &xmm_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ if (pm)
+ pm += 4;
+
+ w -= 4;
+ }
+
+ while (w)
+ {
+ uint32_t s = combine1 (ps, pm);
+ uint32_t d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (d), negate_1x64 (
+ expand_alpha_1x64 (unpack_32_1x64 (s)))));
+ ps++;
+ if (pm)
+ pm++;
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_out_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ while (w && ((unsigned long) pd & 15))
+ {
+ uint32_t s = combine1 (ps, pm);
+ uint32_t d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (s), negate_1x64 (
+ expand_alpha_1x64 (unpack_32_1x64 (d)))));
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+
+ xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ negate_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ uint32_t s = combine1 (ps, pm);
+ uint32_t d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (s), negate_1x64 (
+ expand_alpha_1x64 (unpack_32_1x64 (d)))));
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline uint32_t
+core_combine_atop_u_pixel_sse2 (uint32_t src,
+ uint32_t dst)
+{
+ __m64 s = unpack_32_1x64 (src);
+ __m64 d = unpack_32_1x64 (dst);
+
+ __m64 sa = negate_1x64 (expand_alpha_1x64 (s));
+ __m64 da = expand_alpha_1x64 (d);
+
+ return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
+}
+
+static force_inline void
+core_combine_atop_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ uint32_t s, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+ __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+
+ while (w && ((unsigned long) pd & 15))
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+
+ pix_add_multiply_2x128 (
+ &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline uint32_t
+core_combine_reverse_atop_u_pixel_sse2 (uint32_t src,
+ uint32_t dst)
+{
+ __m64 s = unpack_32_1x64 (src);
+ __m64 d = unpack_32_1x64 (dst);
+
+ __m64 sa = expand_alpha_1x64 (s);
+ __m64 da = negate_1x64 (expand_alpha_1x64 (d));
+
+ return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
+}
+
+static force_inline void
+core_combine_reverse_atop_u_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t* pm,
+ int w)
+{
+ uint32_t s, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+ __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+
+ while (w && ((unsigned long) pd & 15))
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
+ ps++;
+ w--;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ pix_add_multiply_2x128 (
+ &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
+ ps++;
+ w--;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline uint32_t
+core_combine_xor_u_pixel_sse2 (uint32_t src,
+ uint32_t dst)
+{
+ __m64 s = unpack_32_1x64 (src);
+ __m64 d = unpack_32_1x64 (dst);
+
+ __m64 neg_d = negate_1x64 (expand_alpha_1x64 (d));
+ __m64 neg_s = negate_1x64 (expand_alpha_1x64 (s));
+
+ return pack_1x64_32 (pix_add_multiply_1x64 (&s, &neg_d, &d, &neg_s));
+}
+
+static force_inline void
+core_combine_xor_u_sse2 (uint32_t* dst,
+ const uint32_t* src,
+ const uint32_t *mask,
+ int width)
+{
+ int w = width;
+ uint32_t s, d;
+ uint32_t* pd = dst;
+ const uint32_t* ps = src;
+ const uint32_t* pm = mask;
+
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+ __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+
+ while (w && ((unsigned long) pd & 15))
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm);
+ xmm_dst = load_128_aligned ((__m128i*) pd);
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+ negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ pix_add_multiply_2x128 (
+ &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ w -= 4;
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline void
+core_combine_add_u_sse2 (uint32_t* dst,
+ const uint32_t* src,
+ const uint32_t* mask,
+ int width)
+{
+ int w = width;
+ uint32_t s, d;
+ uint32_t* pd = dst;
+ const uint32_t* ps = src;
+ const uint32_t* pm = mask;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ ps++;
+ if (pm)
+ pm++;
+ *pd++ = _mm_cvtsi64_si32 (
+ _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ __m128i s;
+
+ s = combine4 ((__m128i*)ps, (__m128i*)pm);
+
+ save_128_aligned (
+ (__m128i*)pd, _mm_adds_epu8 (s, load_128_aligned ((__m128i*)pd)));
+
+ pd += 4;
+ ps += 4;
+ if (pm)
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w--)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ ps++;
+ *pd++ = _mm_cvtsi64_si32 (
+ _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline uint32_t
+core_combine_saturate_u_pixel_sse2 (uint32_t src,
+ uint32_t dst)
+{
+ __m64 ms = unpack_32_1x64 (src);
+ __m64 md = unpack_32_1x64 (dst);
+ uint32_t sa = src >> 24;
+ uint32_t da = ~dst >> 24;
+
+ if (sa > da)
+ {
+ ms = pix_multiply_1x64 (
+ ms, expand_alpha_1x64 (unpack_32_1x64 (DIV_UN8 (da, sa) << 24)));
+ }
+
+ return pack_1x64_32 (_mm_adds_pu16 (md, ms));
+}
+
+static force_inline void
+core_combine_saturate_u_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, d;
+
+ uint32_t pack_cmp;
+ __m128i xmm_src, xmm_dst;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
+ w--;
+ ps++;
+ if (pm)
+ pm++;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst = load_128_aligned ((__m128i*)pd);
+ xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm);
+
+ pack_cmp = _mm_movemask_epi8 (
+ _mm_cmpgt_epi32 (
+ _mm_srli_epi32 (xmm_src, 24),
+ _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24)));
+
+ /* if some alpha src is grater than respective ~alpha dst */
+ if (pack_cmp)
+ {
+ s = combine1 (ps++, pm);
+ d = *pd;
+ *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
+ if (pm)
+ pm++;
+
+ s = combine1 (ps++, pm);
+ d = *pd;
+ *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
+ if (pm)
+ pm++;
+
+ s = combine1 (ps++, pm);
+ d = *pd;
+ *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
+ if (pm)
+ pm++;
+
+ s = combine1 (ps++, pm);
+ d = *pd;
+ *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
+ if (pm)
+ pm++;
+ }
+ else
+ {
+ save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src));
+
+ pd += 4;
+ ps += 4;
+ if (pm)
+ pm += 4;
+ }
+
+ w -= 4;
+ }
+
+ while (w--)
+ {
+ s = combine1 (ps, pm);
+ d = *pd;
+
+ *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
+ ps++;
+ if (pm)
+ pm++;
+ }
+}
+
+static force_inline void
+core_combine_src_ca_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
+ w--;
+ }
+}
+
+static force_inline uint32_t
+core_combine_over_ca_pixel_sse2 (uint32_t src,
+ uint32_t mask,
+ uint32_t dst)
+{
+ __m64 s = unpack_32_1x64 (src);
+ __m64 expAlpha = expand_alpha_1x64 (s);
+ __m64 unpk_mask = unpack_32_1x64 (mask);
+ __m64 unpk_dst = unpack_32_1x64 (dst);
+
+ return pack_1x64_32 (in_over_1x64 (&s, &expAlpha, &unpk_mask, &unpk_dst));
+}
+
+static force_inline void
+core_combine_over_ca_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+}
+
+static force_inline uint32_t
+core_combine_over_reverse_ca_pixel_sse2 (uint32_t src,
+ uint32_t mask,
+ uint32_t dst)
+{
+ __m64 d = unpack_32_1x64 (dst);
+
+ return pack_1x64_32 (
+ over_1x64 (d, expand_alpha_1x64 (d),
+ pix_multiply_1x64 (unpack_32_1x64 (src),
+ unpack_32_1x64 (mask))));
+}
+
+static force_inline void
+core_combine_over_reverse_ca_sse2 (uint32_t* pd,
+ const uint32_t* ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_in_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
+ expand_alpha_1x64 (unpack_32_1x64 (d))));
+
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (s), unpack_32_1x64 (m)),
+ expand_alpha_1x64 (unpack_32_1x64 (d))));
+
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_in_reverse_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (d),
+ pix_multiply_1x64 (unpack_32_1x64 (m),
+ expand_alpha_1x64 (unpack_32_1x64 (s)))));
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+ pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (d),
+ pix_multiply_1x64 (unpack_32_1x64 (m),
+ expand_alpha_1x64 (unpack_32_1x64 (s)))));
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_out_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (s), unpack_32_1x64 (m)),
+ negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+ negate_2x128 (xmm_alpha_lo, xmm_alpha_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+ pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (s), unpack_32_1x64 (m)),
+ negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
+
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_out_reverse_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (d),
+ negate_1x64 (pix_multiply_1x64 (
+ unpack_32_1x64 (m),
+ expand_alpha_1x64 (unpack_32_1x64 (s))))));
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ negate_2x128 (xmm_mask_lo, xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (d),
+ negate_1x64 (pix_multiply_1x64 (
+ unpack_32_1x64 (m),
+ expand_alpha_1x64 (unpack_32_1x64 (s))))));
+ w--;
+ }
+}
+
+static force_inline uint32_t
+core_combine_atop_ca_pixel_sse2 (uint32_t src,
+ uint32_t mask,
+ uint32_t dst)
+{
+ __m64 m = unpack_32_1x64 (mask);
+ __m64 s = unpack_32_1x64 (src);
+ __m64 d = unpack_32_1x64 (dst);
+ __m64 sa = expand_alpha_1x64 (s);
+ __m64 da = expand_alpha_1x64 (d);
+
+ s = pix_multiply_1x64 (s, m);
+ m = negate_1x64 (pix_multiply_1x64 (m, sa));
+
+ return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
+}
+
+static force_inline void
+core_combine_atop_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+ __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_src_lo, &xmm_src_hi);
+ pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_add_multiply_2x128 (
+ &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+}
+
+static force_inline uint32_t
+core_combine_reverse_atop_ca_pixel_sse2 (uint32_t src,
+ uint32_t mask,
+ uint32_t dst)
+{
+ __m64 m = unpack_32_1x64 (mask);
+ __m64 s = unpack_32_1x64 (src);
+ __m64 d = unpack_32_1x64 (dst);
+
+ __m64 da = negate_1x64 (expand_alpha_1x64 (d));
+ __m64 sa = expand_alpha_1x64 (s);
+
+ s = pix_multiply_1x64 (s, m);
+ m = pix_multiply_1x64 (m, sa);
+
+ return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
+}
+
+static force_inline void
+core_combine_reverse_atop_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+ __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_src_lo, &xmm_src_hi);
+ pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ pix_add_multiply_2x128 (
+ &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+}
+
+static force_inline uint32_t
+core_combine_xor_ca_pixel_sse2 (uint32_t src,
+ uint32_t mask,
+ uint32_t dst)
+{
+ __m64 a = unpack_32_1x64 (mask);
+ __m64 s = unpack_32_1x64 (src);
+ __m64 d = unpack_32_1x64 (dst);
+
+ __m64 alpha_dst = negate_1x64 (pix_multiply_1x64 (
+ a, expand_alpha_1x64 (s)));
+ __m64 dest = pix_multiply_1x64 (s, a);
+ __m64 alpha_src = negate_1x64 (expand_alpha_1x64 (d));
+
+ return pack_1x64_32 (pix_add_multiply_1x64 (&d,
+ &alpha_dst,
+ &dest,
+ &alpha_src));
+}
+
+static force_inline void
+core_combine_xor_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+ __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_src_lo, &xmm_src_hi);
+ pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
+ &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
+ &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+ negate_2x128 (xmm_mask_lo, xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_add_multiply_2x128 (
+ &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
+ w--;
+ }
+}
+
+static force_inline void
+core_combine_add_ca_sse2 (uint32_t * pd,
+ const uint32_t *ps,
+ const uint32_t *pm,
+ int w)
+{
+ uint32_t s, m, d;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask_lo, xmm_mask_hi;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
+ unpack_32_1x64 (m)),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+ xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+ xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_src_lo, &xmm_src_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (
+ _mm_adds_epu8 (xmm_src_lo, xmm_dst_lo),
+ _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi)));
+
+ ps += 4;
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ s = *ps++;
+ m = *pm++;
+ d = *pd;
+
+ *pd++ = pack_1x64_32 (
+ _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
+ unpack_32_1x64 (m)),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+}
+
+/* ---------------------------------------------------
+ * fb_compose_setup_sSE2
+ */
+static force_inline __m64
+create_mask_16_64 (uint16_t mask)
+{
+ return _mm_set1_pi16 (mask);
+}
+
+static force_inline __m128i
+create_mask_16_128 (uint16_t mask)
+{
+ return _mm_set1_epi16 (mask);
+}
+
+static force_inline __m64
+create_mask_2x32_64 (uint32_t mask0,
+ uint32_t mask1)
+{
+ return _mm_set_pi32 (mask0, mask1);
+}
+
+/* Work around a code generation bug in Sun Studio 12. */
+#if defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)
+# define create_mask_2x32_128(mask0, mask1) \
+ (_mm_set_epi32 ((mask0), (mask1), (mask0), (mask1)))
+#else
+static force_inline __m128i
+create_mask_2x32_128 (uint32_t mask0,
+ uint32_t mask1)
+{
+ return _mm_set_epi32 (mask0, mask1, mask0, mask1);
+}
+#endif
+
+/* SSE2 code patch for fbcompose.c */
+
+static void
+sse2_combine_over_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_over_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_over_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_over_reverse_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_in_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_in_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_in_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_reverse_in_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_out_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_out_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_out_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_reverse_out_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_atop_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_atop_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_atop_reverse_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_reverse_atop_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_xor_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_xor_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_add_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_add_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_saturate_u (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_saturate_u_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_src_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_src_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_over_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_over_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_over_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_over_reverse_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_in_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_in_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_in_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_in_reverse_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_out_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_out_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_out_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_out_reverse_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_atop_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_atop_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_atop_reverse_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_reverse_atop_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_xor_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_xor_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+static void
+sse2_combine_add_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ uint32_t * dst,
+ const uint32_t * src,
+ const uint32_t * mask,
+ int width)
+{
+ core_combine_add_ca_sse2 (dst, src, mask, width);
+ _mm_empty ();
+}
+
+/* -------------------------------------------------------------------
+ * composite_over_n_8888
+ */
+
+static void
+sse2_composite_over_n_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src;
+ uint32_t *dst_line, *dst, d;
+ int32_t w;
+ int dst_stride;
+ __m128i xmm_src, xmm_alpha;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+
+ xmm_src = expand_pixel_32_1x128 (src);
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+
+ while (height--)
+ {
+ dst = dst_line;
+
+ dst_line += dst_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ d = *dst;
+ *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+ _mm_movepi64_pi64 (xmm_alpha),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ /* rebuid the 4 pixel data and save*/
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ w -= 4;
+ dst += 4;
+ }
+
+ while (w)
+ {
+ d = *dst;
+ *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+ _mm_movepi64_pi64 (xmm_alpha),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+
+ }
+ _mm_empty ();
+}
+
+/* ---------------------------------------------------------------------
+ * composite_over_n_0565
+ */
+static void
+sse2_composite_over_n_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src;
+ uint16_t *dst_line, *dst, d;
+ int32_t w;
+ int dst_stride;
+ __m128i xmm_src, xmm_alpha;
+ __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+
+ xmm_src = expand_pixel_32_1x128 (src);
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+
+ while (height--)
+ {
+ dst = dst_line;
+
+ dst_line += dst_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ d = *dst;
+
+ *dst++ = pack_565_32_16 (
+ pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+ _mm_movepi64_pi64 (xmm_alpha),
+ expand565_16_1x64 (d))));
+ w--;
+ }
+
+ while (w >= 8)
+ {
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_565_128_4x128 (xmm_dst,
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+
+ over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_dst0, &xmm_dst1);
+ over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_dst2, &xmm_dst3);
+
+ xmm_dst = pack_565_4x128_128 (
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+
+ save_128_aligned ((__m128i*)dst, xmm_dst);
+
+ dst += 8;
+ w -= 8;
+ }
+
+ while (w--)
+ {
+ d = *dst;
+ *dst++ = pack_565_32_16 (
+ pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+ _mm_movepi64_pi64 (xmm_alpha),
+ expand565_16_1x64 (d))));
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* ------------------------------
+ * composite_add_n_8888_8888_ca
+ */
+static void
+sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst_line, d;
+ uint32_t *mask_line, m;
+ uint32_t pack_cmp;
+ int dst_stride, mask_stride;
+
+ __m128i xmm_src, xmm_alpha;
+ __m128i xmm_dst;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+ srca = src >> 24;
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+
+ xmm_src = _mm_unpacklo_epi8 (
+ create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+ mmx_src = _mm_movepi64_pi64 (xmm_src);
+ mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
+
+ while (height--)
+ {
+ int w = width;
+ const uint32_t *pm = (uint32_t *)mask_line;
+ uint32_t *pd = (uint32_t *)dst_line;
+
+ dst_line += dst_stride;
+ mask_line += mask_stride;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ m = *pm++;
+
+ if (m)
+ {
+ d = *pd;
+
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *pd = pack_1x64_32 (
+ _mm_adds_pu8 (pix_multiply_1x64 (mmx_mask, mmx_src), mmx_dest));
+ }
+
+ pd++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_mask = load_128_unaligned ((__m128i*)pm);
+
+ pack_cmp =
+ _mm_movemask_epi8 (
+ _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
+
+ /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
+ if (pack_cmp != 0xffff)
+ {
+ xmm_dst = load_128_aligned ((__m128i*)pd);
+
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_multiply_2x128 (&xmm_src, &xmm_src,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+ xmm_mask_hi = pack_2x128_128 (xmm_mask_lo, xmm_mask_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, _mm_adds_epu8 (xmm_mask_hi, xmm_dst));
+ }
+
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ m = *pm++;
+
+ if (m)
+ {
+ d = *pd;
+
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *pd = pack_1x64_32 (
+ _mm_adds_pu8 (pix_multiply_1x64 (mmx_mask, mmx_src), mmx_dest));
+ }
+
+ pd++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* ---------------------------------------------------------------------------
+ * composite_over_n_8888_8888_ca
+ */
+
+static void
+sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src;
+ uint32_t *dst_line, d;
+ uint32_t *mask_line, m;
+ uint32_t pack_cmp;
+ int dst_stride, mask_stride;
+
+ __m128i xmm_src, xmm_alpha;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+
+ xmm_src = _mm_unpacklo_epi8 (
+ create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+ mmx_src = _mm_movepi64_pi64 (xmm_src);
+ mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
+
+ while (height--)
+ {
+ int w = width;
+ const uint32_t *pm = (uint32_t *)mask_line;
+ uint32_t *pd = (uint32_t *)dst_line;
+
+ dst_line += dst_stride;
+ mask_line += mask_stride;
+
+ while (w && (unsigned long)pd & 15)
+ {
+ m = *pm++;
+
+ if (m)
+ {
+ d = *pd;
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *pd = pack_1x64_32 (in_over_1x64 (&mmx_src,
+ &mmx_alpha,
+ &mmx_mask,
+ &mmx_dest));
+ }
+
+ pd++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_mask = load_128_unaligned ((__m128i*)pm);
+
+ pack_cmp =
+ _mm_movemask_epi8 (
+ _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
+
+ /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
+ if (pack_cmp != 0xffff)
+ {
+ xmm_dst = load_128_aligned ((__m128i*)pd);
+
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ in_over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ pd += 4;
+ pm += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ m = *pm++;
+
+ if (m)
+ {
+ d = *pd;
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *pd = pack_1x64_32 (
+ in_over_1x64 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest));
+ }
+
+ pd++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/*---------------------------------------------------------------------
+ * composite_over_8888_n_8888
+ */
+
+static void
+sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ uint32_t mask;
+ int32_t w;
+ int dst_stride, src_stride;
+
+ __m128i xmm_mask;
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ mask = _pixman_image_get_solid (imp, mask_image, PIXMAN_a8r8g8b8);
+
+ xmm_mask = create_mask_16_128 (mask >> 24);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ uint32_t s = *src++;
+
+ if (s)
+ {
+ uint32_t d = *dst;
+
+ __m64 ms = unpack_32_1x64 (s);
+ __m64 alpha = expand_alpha_1x64 (ms);
+ __m64 dest = _mm_movepi64_pi64 (xmm_mask);
+ __m64 alpha_dst = unpack_32_1x64 (d);
+
+ *dst = pack_1x64_32 (
+ in_over_1x64 (&ms, &alpha, &dest, &alpha_dst));
+ }
+ dst++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src = load_128_unaligned ((__m128i*)src);
+
+ if (!is_zero (xmm_src))
+ {
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_mask, &xmm_mask,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ dst += 4;
+ src += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ uint32_t s = *src++;
+
+ if (s)
+ {
+ uint32_t d = *dst;
+
+ __m64 ms = unpack_32_1x64 (s);
+ __m64 alpha = expand_alpha_1x64 (ms);
+ __m64 mask = _mm_movepi64_pi64 (xmm_mask);
+ __m64 dest = unpack_32_1x64 (d);
+
+ *dst = pack_1x64_32 (
+ in_over_1x64 (&ms, &alpha, &mask, &dest));
+ }
+
+ dst++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/*---------------------------------------------------------------------
+ * composite_over_8888_n_8888
+ */
+
+static void
+sse2_composite_src_x888_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ int32_t w;
+ int dst_stride, src_stride;
+
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ *dst++ = *src++ | 0xff000000;
+ w--;
+ }
+
+ while (w >= 16)
+ {
+ __m128i xmm_src1, xmm_src2, xmm_src3, xmm_src4;
+
+ xmm_src1 = load_128_unaligned ((__m128i*)src + 0);
+ xmm_src2 = load_128_unaligned ((__m128i*)src + 1);
+ xmm_src3 = load_128_unaligned ((__m128i*)src + 2);
+ xmm_src4 = load_128_unaligned ((__m128i*)src + 3);
+
+ save_128_aligned ((__m128i*)dst + 0, _mm_or_si128 (xmm_src1, mask_ff000000));
+ save_128_aligned ((__m128i*)dst + 1, _mm_or_si128 (xmm_src2, mask_ff000000));
+ save_128_aligned ((__m128i*)dst + 2, _mm_or_si128 (xmm_src3, mask_ff000000));
+ save_128_aligned ((__m128i*)dst + 3, _mm_or_si128 (xmm_src4, mask_ff000000));
+
+ dst += 16;
+ src += 16;
+ w -= 16;
+ }
+
+ while (w)
+ {
+ *dst++ = *src++ | 0xff000000;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* ---------------------------------------------------------------------
+ * composite_over_x888_n_8888
+ */
+static void
+sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ uint32_t mask;
+ int dst_stride, src_stride;
+ int32_t w;
+
+ __m128i xmm_mask, xmm_alpha;
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ mask = _pixman_image_get_solid (imp, mask_image, PIXMAN_a8r8g8b8);
+
+ xmm_mask = create_mask_16_128 (mask >> 24);
+ xmm_alpha = mask_00ff;
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ uint32_t s = (*src++) | 0xff000000;
+ uint32_t d = *dst;
+
+ __m64 src = unpack_32_1x64 (s);
+ __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
+ __m64 mask = _mm_movepi64_pi64 (xmm_mask);
+ __m64 dest = unpack_32_1x64 (d);
+
+ *dst++ = pack_1x64_32 (
+ in_over_1x64 (&src, &alpha, &mask, &dest));
+
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src = _mm_or_si128 (
+ load_128_unaligned ((__m128i*)src), mask_ff000000);
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_mask, &xmm_mask,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ dst += 4;
+ src += 4;
+ w -= 4;
+
+ }
+
+ while (w)
+ {
+ uint32_t s = (*src++) | 0xff000000;
+ uint32_t d = *dst;
+
+ __m64 src = unpack_32_1x64 (s);
+ __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
+ __m64 mask = _mm_movepi64_pi64 (xmm_mask);
+ __m64 dest = unpack_32_1x64 (d);
+
+ *dst++ = pack_1x64_32 (
+ in_over_1x64 (&src, &alpha, &mask, &dest));
+
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* --------------------------------------------------------------------
+ * composite_over_8888_8888
+ */
+static void
+sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ int dst_stride, src_stride;
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ dst = dst_line;
+ src = src_line;
+
+ while (height--)
+ {
+ core_combine_over_u_sse2 (dst, src, NULL, width);
+
+ dst += dst_stride;
+ src += src_stride;
+ }
+ _mm_empty ();
+}
+
+/* ------------------------------------------------------------------
+ * composite_over_8888_0565
+ */
+static force_inline uint16_t
+composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
+{
+ __m64 ms;
+
+ ms = unpack_32_1x64 (src);
+ return pack_565_32_16 (
+ pack_1x64_32 (
+ over_1x64 (
+ ms, expand_alpha_1x64 (ms), expand565_16_1x64 (dst))));
+}
+
+static void
+sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint16_t *dst_line, *dst, d;
+ uint32_t *src_line, *src, s;
+ int dst_stride, src_stride;
+ int32_t w;
+
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+#if 0
+ /* FIXME
+ *
+ * I copy the code from MMX one and keep the fixme.
+ * If it's a problem there, probably is a problem here.
+ */
+ assert (src_image->drawable == mask_image->drawable);
+#endif
+
+ while (height--)
+ {
+ dst = dst_line;
+ src = src_line;
+
+ dst_line += dst_stride;
+ src_line += src_stride;
+ w = width;
+
+ /* Align dst on a 16-byte boundary */
+ while (w &&
+ ((unsigned long)dst & 15))
+ {
+ s = *src++;
+ d = *dst;
+
+ *dst++ = composite_over_8888_0565pixel (s, d);
+ w--;
+ }
+
+ /* It's a 8 pixel loop */
+ while (w >= 8)
+ {
+ /* I'm loading unaligned because I'm not sure
+ * about the address alignment.
+ */
+ xmm_src = load_128_unaligned ((__m128i*) src);
+ xmm_dst = load_128_aligned ((__m128i*) dst);
+
+ /* Unpacking */
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_565_128_4x128 (xmm_dst,
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ /* I'm loading next 4 pixels from memory
+ * before to optimze the memory read.
+ */
+ xmm_src = load_128_unaligned ((__m128i*) (src + 4));
+
+ over_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_dst0, &xmm_dst1);
+
+ /* Unpacking */
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ over_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_dst2, &xmm_dst3);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_565_4x128_128 (
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
+
+ w -= 8;
+ dst += 8;
+ src += 8;
+ }
+
+ while (w--)
+ {
+ s = *src++;
+ d = *dst;
+
+ *dst++ = composite_over_8888_0565pixel (s, d);
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -----------------------------------------------------------------
+ * composite_over_n_8_8888
+ */
+
+static void
+sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ int32_t w;
+ uint32_t m, d;
+
+ __m128i xmm_src, xmm_alpha, xmm_def;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ xmm_def = create_mask_2x32_128 (src, src);
+ xmm_src = expand_pixel_32_1x128 (src);
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+ mmx_src = _mm_movepi64_pi64 (xmm_src);
+ mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ uint8_t m = *mask++;
+
+ if (m)
+ {
+ d = *dst;
+ mmx_mask = expand_pixel_8_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
+ &mmx_alpha,
+ &mmx_mask,
+ &mmx_dest));
+ }
+
+ w--;
+ dst++;
+ }
+
+ while (w >= 4)
+ {
+ m = *((uint32_t*)mask);
+
+ if (srca == 0xff && m == 0xffffffff)
+ {
+ save_128_aligned ((__m128i*)dst, xmm_def);
+ }
+ else if (m)
+ {
+ xmm_dst = load_128_aligned ((__m128i*) dst);
+ xmm_mask = unpack_32_1x128 (m);
+ xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
+
+ /* Unpacking */
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ in_over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ w -= 4;
+ dst += 4;
+ mask += 4;
+ }
+
+ while (w)
+ {
+ uint8_t m = *mask++;
+
+ if (m)
+ {
+ d = *dst;
+ mmx_mask = expand_pixel_8_1x64 (m);
+ mmx_dest = unpack_32_1x64 (d);
+
+ *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
+ &mmx_alpha,
+ &mmx_mask,
+ &mmx_dest));
+ }
+
+ w--;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* ----------------------------------------------------------------
+ * composite_over_n_8_8888
+ */
+
+pixman_bool_t
+pixman_fill_sse2 (uint32_t *bits,
+ int stride,
+ int bpp,
+ int x,
+ int y,
+ int width,
+ int height,
+ uint32_t data)
+{
+ uint32_t byte_width;
+ uint8_t *byte_line;
+
+ __m128i xmm_def;
+
+ if (bpp == 8)
+ {
+ uint8_t b;
+ uint16_t w;
+
+ stride = stride * (int) sizeof (uint32_t) / 1;
+ byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
+ byte_width = width;
+ stride *= 1;
+
+ b = data & 0xff;
+ w = (b << 8) | b;
+ data = (w << 16) | w;
+ }
+ else if (bpp == 16)
+ {
+ stride = stride * (int) sizeof (uint32_t) / 2;
+ byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
+ byte_width = 2 * width;
+ stride *= 2;
+
+ data = (data & 0xffff) * 0x00010001;
+ }
+ else if (bpp == 32)
+ {
+ stride = stride * (int) sizeof (uint32_t) / 4;
+ byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
+ byte_width = 4 * width;
+ stride *= 4;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ xmm_def = create_mask_2x32_128 (data, data);
+
+ while (height--)
+ {
+ int w;
+ uint8_t *d = byte_line;
+ byte_line += stride;
+ w = byte_width;
+
+ while (w >= 1 && ((unsigned long)d & 1))
+ {
+ *(uint8_t *)d = data;
+ w -= 1;
+ d += 1;
+ }
+
+ while (w >= 2 && ((unsigned long)d & 3))
+ {
+ *(uint16_t *)d = data;
+ w -= 2;
+ d += 2;
+ }
+
+ while (w >= 4 && ((unsigned long)d & 15))
+ {
+ *(uint32_t *)d = data;
+
+ w -= 4;
+ d += 4;
+ }
+
+ while (w >= 128)
+ {
+ save_128_aligned ((__m128i*)(d), xmm_def);
+ save_128_aligned ((__m128i*)(d + 16), xmm_def);
+ save_128_aligned ((__m128i*)(d + 32), xmm_def);
+ save_128_aligned ((__m128i*)(d + 48), xmm_def);
+ save_128_aligned ((__m128i*)(d + 64), xmm_def);
+ save_128_aligned ((__m128i*)(d + 80), xmm_def);
+ save_128_aligned ((__m128i*)(d + 96), xmm_def);
+ save_128_aligned ((__m128i*)(d + 112), xmm_def);
+
+ d += 128;
+ w -= 128;
+ }
+
+ if (w >= 64)
+ {
+ save_128_aligned ((__m128i*)(d), xmm_def);
+ save_128_aligned ((__m128i*)(d + 16), xmm_def);
+ save_128_aligned ((__m128i*)(d + 32), xmm_def);
+ save_128_aligned ((__m128i*)(d + 48), xmm_def);
+
+ d += 64;
+ w -= 64;
+ }
+
+ if (w >= 32)
+ {
+ save_128_aligned ((__m128i*)(d), xmm_def);
+ save_128_aligned ((__m128i*)(d + 16), xmm_def);
+
+ d += 32;
+ w -= 32;
+ }
+
+ if (w >= 16)
+ {
+ save_128_aligned ((__m128i*)(d), xmm_def);
+
+ d += 16;
+ w -= 16;
+ }
+
+ while (w >= 4)
+ {
+ *(uint32_t *)d = data;
+
+ w -= 4;
+ d += 4;
+ }
+
+ if (w >= 2)
+ {
+ *(uint16_t *)d = data;
+ w -= 2;
+ d += 2;
+ }
+
+ if (w >= 1)
+ {
+ *(uint8_t *)d = data;
+ w -= 1;
+ d += 1;
+ }
+ }
+
+ _mm_empty ();
+ return TRUE;
+}
+
+static void
+sse2_composite_src_n_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint32_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ int32_t w;
+ uint32_t m;
+
+ __m128i xmm_src, xmm_def;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ {
+ pixman_fill_sse2 (dst_image->bits.bits, dst_image->bits.rowstride,
+ PIXMAN_FORMAT_BPP (dst_image->bits.format),
+ dest_x, dest_y, width, height, 0);
+ return;
+ }
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ xmm_def = create_mask_2x32_128 (src, src);
+ xmm_src = expand_pixel_32_1x128 (src);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ uint8_t m = *mask++;
+
+ if (m)
+ {
+ *dst = pack_1x64_32 (
+ pix_multiply_1x64 (
+ _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
+ }
+ else
+ {
+ *dst = 0;
+ }
+
+ w--;
+ dst++;
+ }
+
+ while (w >= 4)
+ {
+ m = *((uint32_t*)mask);
+
+ if (srca == 0xff && m == 0xffffffff)
+ {
+ save_128_aligned ((__m128i*)dst, xmm_def);
+ }
+ else if (m)
+ {
+ xmm_mask = unpack_32_1x128 (m);
+ xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
+
+ /* Unpacking */
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_multiply_2x128 (&xmm_src, &xmm_src,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
+ }
+ else
+ {
+ save_128_aligned ((__m128i*)dst, _mm_setzero_si128 ());
+ }
+
+ w -= 4;
+ dst += 4;
+ mask += 4;
+ }
+
+ while (w)
+ {
+ uint8_t m = *mask++;
+
+ if (m)
+ {
+ *dst = pack_1x64_32 (
+ pix_multiply_1x64 (
+ _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
+ }
+ else
+ {
+ *dst = 0;
+ }
+
+ w--;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/*-----------------------------------------------------------------------
+ * composite_over_n_8_0565
+ */
+
+static void
+sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src, srca;
+ uint16_t *dst_line, *dst, d;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ int32_t w;
+ uint32_t m;
+ __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
+
+ __m128i xmm_src, xmm_alpha;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+ __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ srca = src >> 24;
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ xmm_src = expand_pixel_32_1x128 (src);
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+ mmx_src = _mm_movepi64_pi64 (xmm_src);
+ mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ m = *mask++;
+
+ if (m)
+ {
+ d = *dst;
+ mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+ mmx_dest = expand565_16_1x64 (d);
+
+ *dst = pack_565_32_16 (
+ pack_1x64_32 (
+ in_over_1x64 (
+ &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
+ }
+
+ w--;
+ dst++;
+ }
+
+ while (w >= 8)
+ {
+ xmm_dst = load_128_aligned ((__m128i*) dst);
+ unpack_565_128_4x128 (xmm_dst,
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+
+ m = *((uint32_t*)mask);
+ mask += 4;
+
+ if (m)
+ {
+ xmm_mask = unpack_32_1x128 (m);
+ xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
+
+ /* Unpacking */
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ in_over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst0, &xmm_dst1);
+ }
+
+ m = *((uint32_t*)mask);
+ mask += 4;
+
+ if (m)
+ {
+ xmm_mask = unpack_32_1x128 (m);
+ xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
+
+ /* Unpacking */
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+ in_over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst2, &xmm_dst3);
+ }
+
+ save_128_aligned (
+ (__m128i*)dst, pack_565_4x128_128 (
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
+
+ w -= 8;
+ dst += 8;
+ }
+
+ while (w)
+ {
+ m = *mask++;
+
+ if (m)
+ {
+ d = *dst;
+ mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+ mmx_dest = expand565_16_1x64 (d);
+
+ *dst = pack_565_32_16 (
+ pack_1x64_32 (
+ in_over_1x64 (
+ &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
+ }
+
+ w--;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -----------------------------------------------------------------------
+ * composite_over_pixbuf_0565
+ */
+
+static void
+sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint16_t *dst_line, *dst, d;
+ uint32_t *src_line, *src, s;
+ int dst_stride, src_stride;
+ int32_t w;
+ uint32_t opaque, zero;
+
+ __m64 ms;
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+#if 0
+ /* FIXME
+ *
+ * I copy the code from MMX one and keep the fixme.
+ * If it's a problem there, probably is a problem here.
+ */
+ assert (src_image->drawable == mask_image->drawable);
+#endif
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ s = *src++;
+ d = *dst;
+
+ ms = unpack_32_1x64 (s);
+
+ *dst++ = pack_565_32_16 (
+ pack_1x64_32 (
+ over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
+ w--;
+ }
+
+ while (w >= 8)
+ {
+ /* First round */
+ xmm_src = load_128_unaligned ((__m128i*)src);
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ opaque = is_opaque (xmm_src);
+ zero = is_zero (xmm_src);
+
+ unpack_565_128_4x128 (xmm_dst,
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+
+ /* preload next round*/
+ xmm_src = load_128_unaligned ((__m128i*)(src + 4));
+
+ if (opaque)
+ {
+ invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_dst0, &xmm_dst1);
+ }
+ else if (!zero)
+ {
+ over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_dst0, &xmm_dst1);
+ }
+
+ /* Second round */
+ opaque = is_opaque (xmm_src);
+ zero = is_zero (xmm_src);
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+
+ if (opaque)
+ {
+ invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_dst2, &xmm_dst3);
+ }
+ else if (!zero)
+ {
+ over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_dst2, &xmm_dst3);
+ }
+
+ save_128_aligned (
+ (__m128i*)dst, pack_565_4x128_128 (
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
+
+ w -= 8;
+ src += 8;
+ dst += 8;
+ }
+
+ while (w)
+ {
+ s = *src++;
+ d = *dst;
+
+ ms = unpack_32_1x64 (s);
+
+ *dst++ = pack_565_32_16 (
+ pack_1x64_32 (
+ over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -------------------------------------------------------------------------
+ * composite_over_pixbuf_8888
+ */
+
+static void
+sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst, d;
+ uint32_t *src_line, *src, s;
+ int dst_stride, src_stride;
+ int32_t w;
+ uint32_t opaque, zero;
+
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst_lo, xmm_dst_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+#if 0
+ /* FIXME
+ *
+ * I copy the code from MMX one and keep the fixme.
+ * If it's a problem there, probably is a problem here.
+ */
+ assert (src_image->drawable == mask_image->drawable);
+#endif
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ s = *src++;
+ d = *dst;
+
+ *dst++ = pack_1x64_32 (
+ over_rev_non_pre_1x64 (
+ unpack_32_1x64 (s), unpack_32_1x64 (d)));
+
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_src_hi = load_128_unaligned ((__m128i*)src);
+
+ opaque = is_opaque (xmm_src_hi);
+ zero = is_zero (xmm_src_hi);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+
+ if (opaque)
+ {
+ invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+ else if (!zero)
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ w -= 4;
+ dst += 4;
+ src += 4;
+ }
+
+ while (w)
+ {
+ s = *src++;
+ d = *dst;
+
+ *dst++ = pack_1x64_32 (
+ over_rev_non_pre_1x64 (
+ unpack_32_1x64 (s), unpack_32_1x64 (d)));
+
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -------------------------------------------------------------------------------------------------
+ * composite_over_n_8888_0565_ca
+ */
+
+static void
+sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src;
+ uint16_t *dst_line, *dst, d;
+ uint32_t *mask_line, *mask, m;
+ int dst_stride, mask_stride;
+ int w;
+ uint32_t pack_cmp;
+
+ __m128i xmm_src, xmm_alpha;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+ __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
+
+ __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+
+ xmm_src = expand_pixel_32_1x128 (src);
+ xmm_alpha = expand_alpha_1x128 (xmm_src);
+ mmx_src = _mm_movepi64_pi64 (xmm_src);
+ mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
+
+ while (height--)
+ {
+ w = width;
+ mask = mask_line;
+ dst = dst_line;
+ mask_line += mask_stride;
+ dst_line += dst_stride;
+
+ while (w && ((unsigned long)dst & 15))
+ {
+ m = *(uint32_t *) mask;
+
+ if (m)
+ {
+ d = *dst;
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = expand565_16_1x64 (d);
+
+ *dst = pack_565_32_16 (
+ pack_1x64_32 (
+ in_over_1x64 (
+ &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
+ }
+
+ w--;
+ dst++;
+ mask++;
+ }
+
+ while (w >= 8)
+ {
+ /* First round */
+ xmm_mask = load_128_unaligned ((__m128i*)mask);
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ pack_cmp = _mm_movemask_epi8 (
+ _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
+
+ unpack_565_128_4x128 (xmm_dst,
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ /* preload next round */
+ xmm_mask = load_128_unaligned ((__m128i*)(mask + 4));
+
+ /* preload next round */
+ if (pack_cmp != 0xffff)
+ {
+ in_over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst0, &xmm_dst1);
+ }
+
+ /* Second round */
+ pack_cmp = _mm_movemask_epi8 (
+ _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
+
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+
+ if (pack_cmp != 0xffff)
+ {
+ in_over_2x128 (&xmm_src, &xmm_src,
+ &xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst2, &xmm_dst3);
+ }
+
+ save_128_aligned (
+ (__m128i*)dst, pack_565_4x128_128 (
+ &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
+
+ w -= 8;
+ dst += 8;
+ mask += 8;
+ }
+
+ while (w)
+ {
+ m = *(uint32_t *) mask;
+
+ if (m)
+ {
+ d = *dst;
+ mmx_mask = unpack_32_1x64 (m);
+ mmx_dest = expand565_16_1x64 (d);
+
+ *dst = pack_565_32_16 (
+ pack_1x64_32 (
+ in_over_1x64 (
+ &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
+ }
+
+ w--;
+ dst++;
+ mask++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -----------------------------------------------------------------------
+ * composite_in_n_8_8
+ */
+
+static void
+sse2_composite_in_n_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ uint32_t d, m;
+ uint32_t src;
+ uint8_t sa;
+ int32_t w;
+
+ __m128i xmm_alpha;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ sa = src >> 24;
+
+ xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ while (w && ((unsigned long)dst & 15))
+ {
+ m = (uint32_t) *mask++;
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ pix_multiply_1x64 (
+ pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha),
+ unpack_32_1x64 (m)),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+
+ while (w >= 16)
+ {
+ xmm_mask = load_128_unaligned ((__m128i*)mask);
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
+ &xmm_dst_lo, &xmm_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ mask += 16;
+ dst += 16;
+ w -= 16;
+ }
+
+ while (w)
+ {
+ m = (uint32_t) *mask++;
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ pix_multiply_1x64 (
+ pix_multiply_1x64 (
+ _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -----------------------------------------------------------------------
+ * composite_in_n_8
+ */
+
+static void
+sse2_composite_in_n_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ int dst_stride;
+ uint32_t d;
+ uint32_t src;
+ int32_t w;
+
+ __m128i xmm_alpha;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
+
+ src = src >> 24;
+
+ if (src == 0xff)
+ return;
+
+ if (src == 0x00)
+ {
+ pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
+ 8, dest_x, dest_y, width, height, src);
+
+ return;
+ }
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ w = width;
+
+ while (w && ((unsigned long)dst & 15))
+ {
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ pix_multiply_1x64 (
+ _mm_movepi64_pi64 (xmm_alpha),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+
+ while (w >= 16)
+ {
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
+ &xmm_dst_lo, &xmm_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ dst += 16;
+ w -= 16;
+ }
+
+ while (w)
+ {
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ pix_multiply_1x64 (
+ _mm_movepi64_pi64 (xmm_alpha),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* ---------------------------------------------------------------------------
+ * composite_in_8_8
+ */
+
+static void
+sse2_composite_in_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *src_line, *src;
+ int src_stride, dst_stride;
+ int32_t w;
+ uint32_t s, d;
+
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+ w = width;
+
+ while (w && ((unsigned long)dst & 15))
+ {
+ s = (uint32_t) *src++;
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ pix_multiply_1x64 (
+ unpack_32_1x64 (s), unpack_32_1x64 (d)));
+ w--;
+ }
+
+ while (w >= 16)
+ {
+ xmm_src = load_128_unaligned ((__m128i*)src);
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_dst_lo, &xmm_dst_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ src += 16;
+ dst += 16;
+ w -= 16;
+ }
+
+ while (w)
+ {
+ s = (uint32_t) *src++;
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -------------------------------------------------------------------------
+ * composite_add_n_8_8
+ */
+
+static void
+sse2_composite_add_n_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *mask_line, *mask;
+ int dst_stride, mask_stride;
+ int32_t w;
+ uint32_t src;
+ uint8_t sa;
+ uint32_t m, d;
+
+ __m128i xmm_alpha;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ sa = src >> 24;
+
+ xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+ w = width;
+
+ while (w && ((unsigned long)dst & 15))
+ {
+ m = (uint32_t) *mask++;
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ _mm_adds_pu16 (
+ pix_multiply_1x64 (
+ _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
+ unpack_32_1x64 (d)));
+ w--;
+ }
+
+ while (w >= 16)
+ {
+ xmm_mask = load_128_unaligned ((__m128i*)mask);
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
+ &xmm_mask_lo, &xmm_mask_hi,
+ &xmm_mask_lo, &xmm_mask_hi);
+
+ xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo);
+ xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+
+ mask += 16;
+ dst += 16;
+ w -= 16;
+ }
+
+ while (w)
+ {
+ m = (uint32_t) *mask++;
+ d = (uint32_t) *dst;
+
+ *dst++ = (uint8_t) pack_1x64_32 (
+ _mm_adds_pu16 (
+ pix_multiply_1x64 (
+ _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
+ unpack_32_1x64 (d)));
+
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* -------------------------------------------------------------------------
+ * composite_add_n_8_8
+ */
+
+static void
+sse2_composite_add_n_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ int dst_stride;
+ int32_t w;
+ uint32_t src;
+
+ __m128i xmm_src;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ src >>= 24;
+
+ if (src == 0x00)
+ return;
+
+ if (src == 0xff)
+ {
+ pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
+ 8, dest_x, dest_y, width, height, 0xff);
+
+ return;
+ }
+
+ src = (src << 24) | (src << 16) | (src << 8) | src;
+ xmm_src = _mm_set_epi32 (src, src, src, src);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ w = width;
+
+ while (w && ((unsigned long)dst & 15))
+ {
+ *dst = (uint8_t)_mm_cvtsi64_si32 (
+ _mm_adds_pu8 (
+ _mm_movepi64_pi64 (xmm_src),
+ _mm_cvtsi32_si64 (*dst)));
+
+ w--;
+ dst++;
+ }
+
+ while (w >= 16)
+ {
+ save_128_aligned (
+ (__m128i*)dst, _mm_adds_epu8 (xmm_src, load_128_aligned ((__m128i*)dst)));
+
+ dst += 16;
+ w -= 16;
+ }
+
+ while (w)
+ {
+ *dst = (uint8_t)_mm_cvtsi64_si32 (
+ _mm_adds_pu8 (
+ _mm_movepi64_pi64 (xmm_src),
+ _mm_cvtsi32_si64 (*dst)));
+
+ w--;
+ dst++;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* ----------------------------------------------------------------------
+ * composite_add_8_8
+ */
+
+static void
+sse2_composite_add_8_8 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint8_t *dst_line, *dst;
+ uint8_t *src_line, *src;
+ int dst_stride, src_stride;
+ int32_t w;
+ uint16_t t;
+
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ src = src_line;
+
+ dst_line += dst_stride;
+ src_line += src_stride;
+ w = width;
+
+ /* Small head */
+ while (w && (unsigned long)dst & 3)
+ {
+ t = (*dst) + (*src++);
+ *dst++ = t | (0 - (t >> 8));
+ w--;
+ }
+
+ core_combine_add_u_sse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
+
+ /* Small tail */
+ dst += w & 0xfffc;
+ src += w & 0xfffc;
+
+ w &= 3;
+
+ while (w)
+ {
+ t = (*dst) + (*src++);
+ *dst++ = t | (0 - (t >> 8));
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* ---------------------------------------------------------------------
+ * composite_add_8888_8888
+ */
+static void
+sse2_composite_add_8888_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *dst_line, *dst;
+ uint32_t *src_line, *src;
+ int dst_stride, src_stride;
+
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+
+ while (height--)
+ {
+ dst = dst_line;
+ dst_line += dst_stride;
+ src = src_line;
+ src_line += src_stride;
+
+ core_combine_add_u_sse2 (dst, src, NULL, width);
+ }
+
+ _mm_empty ();
+}
+
+/* -------------------------------------------------------------------------------------------------
+ * sse2_composite_copy_area
+ */
+
+static pixman_bool_t
+pixman_blt_sse2 (uint32_t *src_bits,
+ uint32_t *dst_bits,
+ int src_stride,
+ int dst_stride,
+ int src_bpp,
+ int dst_bpp,
+ int src_x,
+ int src_y,
+ int dst_x,
+ int dst_y,
+ int width,
+ int height)
+{
+ uint8_t * src_bytes;
+ uint8_t * dst_bytes;
+ int byte_width;
+
+ if (src_bpp != dst_bpp)
+ return FALSE;
+
+ if (src_bpp == 16)
+ {
+ src_stride = src_stride * (int) sizeof (uint32_t) / 2;
+ dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
+ src_bytes =(uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
+ dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
+ byte_width = 2 * width;
+ src_stride *= 2;
+ dst_stride *= 2;
+ }
+ else if (src_bpp == 32)
+ {
+ src_stride = src_stride * (int) sizeof (uint32_t) / 4;
+ dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
+ src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
+ dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
+ byte_width = 4 * width;
+ src_stride *= 4;
+ dst_stride *= 4;
+ }
+ else
+ {
+ return FALSE;
+ }
+
+ while (height--)
+ {
+ int w;
+ uint8_t *s = src_bytes;
+ uint8_t *d = dst_bytes;
+ src_bytes += src_stride;
+ dst_bytes += dst_stride;
+ w = byte_width;
+
+ while (w >= 2 && ((unsigned long)d & 3))
+ {
+ *(uint16_t *)d = *(uint16_t *)s;
+ w -= 2;
+ s += 2;
+ d += 2;
+ }
+
+ while (w >= 4 && ((unsigned long)d & 15))
+ {
+ *(uint32_t *)d = *(uint32_t *)s;
+
+ w -= 4;
+ s += 4;
+ d += 4;
+ }
+
+ while (w >= 64)
+ {
+ __m128i xmm0, xmm1, xmm2, xmm3;
+
+ xmm0 = load_128_unaligned ((__m128i*)(s));
+ xmm1 = load_128_unaligned ((__m128i*)(s + 16));
+ xmm2 = load_128_unaligned ((__m128i*)(s + 32));
+ xmm3 = load_128_unaligned ((__m128i*)(s + 48));
+
+ save_128_aligned ((__m128i*)(d), xmm0);
+ save_128_aligned ((__m128i*)(d + 16), xmm1);
+ save_128_aligned ((__m128i*)(d + 32), xmm2);
+ save_128_aligned ((__m128i*)(d + 48), xmm3);
+
+ s += 64;
+ d += 64;
+ w -= 64;
+ }
+
+ while (w >= 16)
+ {
+ save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) );
+
+ w -= 16;
+ d += 16;
+ s += 16;
+ }
+
+ while (w >= 4)
+ {
+ *(uint32_t *)d = *(uint32_t *)s;
+
+ w -= 4;
+ s += 4;
+ d += 4;
+ }
+
+ if (w >= 2)
+ {
+ *(uint16_t *)d = *(uint16_t *)s;
+ w -= 2;
+ s += 2;
+ d += 2;
+ }
+ }
+
+ _mm_empty ();
+
+ return TRUE;
+}
+
+static void
+sse2_composite_copy_area (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ pixman_blt_sse2 (src_image->bits.bits,
+ dst_image->bits.bits,
+ src_image->bits.rowstride,
+ dst_image->bits.rowstride,
+ PIXMAN_FORMAT_BPP (src_image->bits.format),
+ PIXMAN_FORMAT_BPP (dst_image->bits.format),
+ src_x, src_y, dest_x, dest_y, width, height);
+}
+
+static void
+sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *src, *src_line, s;
+ uint32_t *dst, *dst_line, d;
+ uint8_t *mask, *mask_line;
+ uint32_t m;
+ int src_stride, mask_stride, dst_stride;
+ int32_t w;
+ __m64 ms;
+
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ src = src_line;
+ src_line += src_stride;
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ s = 0xff000000 | *src++;
+ m = (uint32_t) *mask++;
+ d = *dst;
+ ms = unpack_32_1x64 (s);
+
+ if (m != 0xff)
+ {
+ __m64 ma = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+ __m64 md = unpack_32_1x64 (d);
+
+ ms = in_over_1x64 (&ms, &mask_x00ff, &ma, &md);
+ }
+
+ *dst++ = pack_1x64_32 (ms);
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ m = *(uint32_t*) mask;
+ xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
+
+ if (m == 0xffffffff)
+ {
+ save_128_aligned ((__m128i*)dst, xmm_src);
+ }
+ else
+ {
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128());
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &mask_00ff, &mask_00ff, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ src += 4;
+ dst += 4;
+ mask += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ m = (uint32_t) *mask++;
+
+ if (m)
+ {
+ s = 0xff000000 | *src;
+
+ if (m == 0xff)
+ {
+ *dst = s;
+ }
+ else
+ {
+ __m64 ma, md, ms;
+
+ d = *dst;
+
+ ma = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+ md = unpack_32_1x64 (d);
+ ms = unpack_32_1x64 (s);
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &mask_x00ff, &ma, &md));
+ }
+
+ }
+
+ src++;
+ dst++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+sse2_composite_over_8888_8_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *src, *src_line, s;
+ uint32_t *dst, *dst_line, d;
+ uint8_t *mask, *mask_line;
+ uint32_t m;
+ int src_stride, mask_stride, dst_stride;
+ int32_t w;
+
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ src = src_line;
+ src_line += src_stride;
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ uint32_t sa;
+
+ s = *src++;
+ m = (uint32_t) *mask++;
+ d = *dst;
+
+ sa = s >> 24;
+
+ if (m)
+ {
+ if (sa == 0xff && m == 0xff)
+ {
+ *dst = s;
+ }
+ else
+ {
+ __m64 ms, md, ma, msa;
+
+ ma = expand_alpha_rev_1x64 (load_32_1x64 (m));
+ ms = unpack_32_1x64 (s);
+ md = unpack_32_1x64 (d);
+
+ msa = expand_alpha_rev_1x64 (load_32_1x64 (sa));
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md));
+ }
+ }
+
+ dst++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ m = *(uint32_t *) mask;
+
+ if (m)
+ {
+ xmm_src = load_128_unaligned ((__m128i*)src);
+
+ if (m == 0xffffffff && is_opaque (xmm_src))
+ {
+ save_128_aligned ((__m128i *)dst, xmm_src);
+ }
+ else
+ {
+ xmm_dst = load_128_aligned ((__m128i *)dst);
+
+ xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128());
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi);
+ expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi,
+ &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+ }
+
+ src += 4;
+ dst += 4;
+ mask += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ uint32_t sa;
+
+ s = *src++;
+ m = (uint32_t) *mask++;
+ d = *dst;
+
+ sa = s >> 24;
+
+ if (m)
+ {
+ if (sa == 0xff && m == 0xff)
+ {
+ *dst = s;
+ }
+ else
+ {
+ __m64 ms, md, ma, msa;
+
+ ma = expand_alpha_rev_1x64 (load_32_1x64 (m));
+ ms = unpack_32_1x64 (s);
+ md = unpack_32_1x64 (d);
+
+ msa = expand_alpha_rev_1x64 (load_32_1x64 (sa));
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md));
+ }
+ }
+
+ dst++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+static void
+sse2_composite_over_reverse_n_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t src;
+ uint32_t *dst_line, *dst;
+ __m128i xmm_src;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_dsta_hi, xmm_dsta_lo;
+ int dst_stride;
+ int32_t w;
+
+ src = _pixman_image_get_solid (imp, src_image, dst_image->bits.format);
+
+ if (src == 0)
+ return;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+
+ xmm_src = expand_pixel_32_1x128 (src);
+
+ while (height--)
+ {
+ dst = dst_line;
+
+ dst_line += dst_stride;
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ __m64 vd;
+
+ vd = unpack_32_1x64 (*dst);
+
+ *dst = pack_1x64_32 (over_1x64 (vd, expand_alpha_1x64 (vd),
+ _mm_movepi64_pi64 (xmm_src)));
+ w--;
+ dst++;
+ }
+
+ while (w >= 4)
+ {
+ __m128i tmp_lo, tmp_hi;
+
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+ expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dsta_lo, &xmm_dsta_hi);
+
+ tmp_lo = xmm_src;
+ tmp_hi = xmm_src;
+
+ over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
+ &xmm_dsta_lo, &xmm_dsta_hi,
+ &tmp_lo, &tmp_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (tmp_lo, tmp_hi));
+
+ w -= 4;
+ dst += 4;
+ }
+
+ while (w)
+ {
+ __m64 vd;
+
+ vd = unpack_32_1x64 (*dst);
+
+ *dst = pack_1x64_32 (over_1x64 (vd, expand_alpha_1x64 (vd),
+ _mm_movepi64_pi64 (xmm_src)));
+ w--;
+ dst++;
+ }
+
+ }
+
+ _mm_empty ();
+}
+
+static void
+sse2_composite_over_8888_8888_8888 (pixman_implementation_t *imp,
+ pixman_op_t op,
+ pixman_image_t * src_image,
+ pixman_image_t * mask_image,
+ pixman_image_t * dst_image,
+ int32_t src_x,
+ int32_t src_y,
+ int32_t mask_x,
+ int32_t mask_y,
+ int32_t dest_x,
+ int32_t dest_y,
+ int32_t width,
+ int32_t height)
+{
+ uint32_t *src, *src_line, s;
+ uint32_t *dst, *dst_line, d;
+ uint32_t *mask, *mask_line;
+ uint32_t m;
+ int src_stride, mask_stride, dst_stride;
+ int32_t w;
+
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+
+ PIXMAN_IMAGE_GET_LINE (
+ dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+ PIXMAN_IMAGE_GET_LINE (
+ src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+ while (height--)
+ {
+ src = src_line;
+ src_line += src_stride;
+ dst = dst_line;
+ dst_line += dst_stride;
+ mask = mask_line;
+ mask_line += mask_stride;
+
+ w = width;
+
+ while (w && (unsigned long)dst & 15)
+ {
+ uint32_t sa;
+
+ s = *src++;
+ m = (*mask++) >> 24;
+ d = *dst;
+
+ sa = s >> 24;
+
+ if (m)
+ {
+ if (sa == 0xff && m == 0xff)
+ {
+ *dst = s;
+ }
+ else
+ {
+ __m64 ms, md, ma, msa;
+
+ ma = expand_alpha_rev_1x64 (load_32_1x64 (m));
+ ms = unpack_32_1x64 (s);
+ md = unpack_32_1x64 (d);
+
+ msa = expand_alpha_rev_1x64 (load_32_1x64 (sa));
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md));
+ }
+ }
+
+ dst++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ xmm_mask = load_128_unaligned ((__m128i*)mask);
+
+ if (!is_transparent (xmm_mask))
+ {
+ xmm_src = load_128_unaligned ((__m128i*)src);
+
+ if (is_opaque (xmm_mask) && is_opaque (xmm_src))
+ {
+ save_128_aligned ((__m128i *)dst, xmm_src);
+ }
+ else
+ {
+ xmm_dst = load_128_aligned ((__m128i *)dst);
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi);
+ expand_alpha_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi,
+ &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+ }
+
+ src += 4;
+ dst += 4;
+ mask += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ uint32_t sa;
+
+ s = *src++;
+ m = (*mask++) >> 24;
+ d = *dst;
+
+ sa = s >> 24;
+
+ if (m)
+ {
+ if (sa == 0xff && m == 0xff)
+ {
+ *dst = s;
+ }
+ else
+ {
+ __m64 ms, md, ma, msa;
+
+ ma = expand_alpha_rev_1x64 (load_32_1x64 (m));
+ ms = unpack_32_1x64 (s);
+ md = unpack_32_1x64 (d);
+
+ msa = expand_alpha_rev_1x64 (load_32_1x64 (sa));
+
+ *dst = pack_1x64_32 (in_over_1x64 (&ms, &msa, &ma, &md));
+ }
+ }
+
+ dst++;
+ w--;
+ }
+ }
+
+ _mm_empty ();
+}
+
+/* A variant of 'core_combine_over_u_sse2' with minor tweaks */
+static force_inline void
+scaled_nearest_scanline_sse2_8888_8888_OVER (uint32_t* pd,
+ const uint32_t* ps,
+ int32_t w,
+ pixman_fixed_t vx,
+ pixman_fixed_t unit_x,
+ pixman_fixed_t max_vx,
+ pixman_bool_t fully_transparent_src)
+{
+ uint32_t s, d;
+ const uint32_t* pm = NULL;
+
+ __m128i xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_src_lo, xmm_src_hi;
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+
+ if (fully_transparent_src)
+ return;
+
+ /* Align dst on a 16-byte boundary */
+ while (w && ((unsigned long)pd & 15))
+ {
+ d = *pd;
+ s = combine1 (ps + (vx >> 16), pm);
+ vx += unit_x;
+
+ *pd++ = core_combine_over_u_pixel_sse2 (s, d);
+ if (pm)
+ pm++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ __m128i tmp;
+ uint32_t tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = ps[vx >> 16];
+ vx += unit_x;
+ tmp2 = ps[vx >> 16];
+ vx += unit_x;
+ tmp3 = ps[vx >> 16];
+ vx += unit_x;
+ tmp4 = ps[vx >> 16];
+ vx += unit_x;
+
+ tmp = _mm_set_epi32 (tmp4, tmp3, tmp2, tmp1);
+
+ xmm_src_hi = combine4 ((__m128i*)&tmp, (__m128i*)pm);
+
+ if (is_opaque (xmm_src_hi))
+ {
+ save_128_aligned ((__m128i*)pd, xmm_src_hi);
+ }
+ else if (!is_zero (xmm_src_hi))
+ {
+ xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+
+ unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+
+ expand_alpha_2x128 (
+ xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
+
+ over_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ /* rebuid the 4 pixel data and save*/
+ save_128_aligned ((__m128i*)pd,
+ pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ w -= 4;
+ pd += 4;
+ if (pm)
+ pm += 4;
+ }
+
+ while (w)
+ {
+ d = *pd;
+ s = combine1 (ps + (vx >> 16), pm);
+ vx += unit_x;
+
+ *pd++ = core_combine_over_u_pixel_sse2 (s, d);
+ if (pm)
+ pm++;
+
+ w--;
+ }
+ _mm_empty ();
+}
+
+FAST_NEAREST_MAINLOOP (sse2_8888_8888_cover_OVER,
+ scaled_nearest_scanline_sse2_8888_8888_OVER,
+ uint32_t, uint32_t, COVER)
+FAST_NEAREST_MAINLOOP (sse2_8888_8888_none_OVER,
+ scaled_nearest_scanline_sse2_8888_8888_OVER,
+ uint32_t, uint32_t, NONE)
+FAST_NEAREST_MAINLOOP (sse2_8888_8888_pad_OVER,
+ scaled_nearest_scanline_sse2_8888_8888_OVER,
+ uint32_t, uint32_t, PAD)
+
+static force_inline void
+scaled_nearest_scanline_sse2_8888_n_8888_OVER (const uint32_t * mask,
+ uint32_t * dst,
+ const uint32_t * src,
+ int32_t w,
+ pixman_fixed_t vx,
+ pixman_fixed_t unit_x,
+ pixman_fixed_t max_vx,
+ pixman_bool_t zero_src)
+{
+ __m128i xmm_mask;
+ __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+ __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+ __m128i xmm_alpha_lo, xmm_alpha_hi;
+
+ if (zero_src || (*mask >> 24) == 0)
+ return;
+
+ xmm_mask = create_mask_16_128 (*mask >> 24);
+
+ while (w && (unsigned long)dst & 15)
+ {
+ uint32_t s = src[pixman_fixed_to_int (vx)];
+ vx += unit_x;
+
+ if (s)
+ {
+ uint32_t d = *dst;
+
+ __m64 ms = unpack_32_1x64 (s);
+ __m64 alpha = expand_alpha_1x64 (ms);
+ __m64 dest = _mm_movepi64_pi64 (xmm_mask);
+ __m64 alpha_dst = unpack_32_1x64 (d);
+
+ *dst = pack_1x64_32 (
+ in_over_1x64 (&ms, &alpha, &dest, &alpha_dst));
+ }
+ dst++;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ uint32_t tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = src[pixman_fixed_to_int (vx)];
+ vx += unit_x;
+ tmp2 = src[pixman_fixed_to_int (vx)];
+ vx += unit_x;
+ tmp3 = src[pixman_fixed_to_int (vx)];
+ vx += unit_x;
+ tmp4 = src[pixman_fixed_to_int (vx)];
+ vx += unit_x;
+
+ xmm_src = _mm_set_epi32 (tmp4, tmp3, tmp2, tmp1);
+
+ if (!is_zero (xmm_src))
+ {
+ xmm_dst = load_128_aligned ((__m128i*)dst);
+
+ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+ expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi);
+
+ in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
+ &xmm_alpha_lo, &xmm_alpha_hi,
+ &xmm_mask, &xmm_mask,
+ &xmm_dst_lo, &xmm_dst_hi);
+
+ save_128_aligned (
+ (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
+ }
+
+ dst += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ uint32_t s = src[pixman_fixed_to_int (vx)];
+ vx += unit_x;
+
+ if (s)
+ {
+ uint32_t d = *dst;
+
+ __m64 ms = unpack_32_1x64 (s);
+ __m64 alpha = expand_alpha_1x64 (ms);
+ __m64 mask = _mm_movepi64_pi64 (xmm_mask);
+ __m64 dest = unpack_32_1x64 (d);
+
+ *dst = pack_1x64_32 (
+ in_over_1x64 (&ms, &alpha, &mask, &dest));
+ }
+
+ dst++;
+ w--;
+ }
+
+ _mm_empty ();
+}
+
+FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_cover_OVER,
+ scaled_nearest_scanline_sse2_8888_n_8888_OVER,
+ uint32_t, uint32_t, uint32_t, COVER, TRUE, TRUE)
+FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_pad_OVER,
+ scaled_nearest_scanline_sse2_8888_n_8888_OVER,
+ uint32_t, uint32_t, uint32_t, PAD, TRUE, TRUE)
+FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_none_OVER,
+ scaled_nearest_scanline_sse2_8888_n_8888_OVER,
+ uint32_t, uint32_t, uint32_t, NONE, TRUE, TRUE)
+
+static const pixman_fast_path_t sse2_fast_paths[] =
+{
+ /* PIXMAN_OP_OVER */
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, sse2_composite_over_n_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, sse2_composite_over_n_8_0565),
+ PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, sse2_composite_over_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, sse2_composite_over_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, sse2_composite_over_n_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, sse2_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, sse2_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, sse2_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, sse2_composite_over_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, sse2_composite_over_8888_0565),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, sse2_composite_over_8888_0565),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, sse2_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, sse2_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, sse2_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, sse2_composite_over_n_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, sse2_composite_over_8888_8888_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, sse2_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, sse2_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, sse2_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, sse2_composite_over_8888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, sse2_composite_over_x888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, sse2_composite_over_x888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, sse2_composite_over_x888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, sse2_composite_over_x888_8_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, sse2_composite_over_x888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, sse2_composite_over_x888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, sse2_composite_over_x888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, sse2_composite_over_x888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, sse2_composite_over_8888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, sse2_composite_over_8888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, sse2_composite_over_8888_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, sse2_composite_over_8888_n_8888),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, sse2_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, sse2_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, sse2_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, sse2_composite_over_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, sse2_composite_over_n_8888_0565_ca),
+ PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, sse2_composite_over_n_8888_0565_ca),
+ PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, sse2_composite_over_pixbuf_8888),
+ PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, sse2_composite_over_pixbuf_8888),
+ PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, sse2_composite_over_pixbuf_8888),
+ PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, sse2_composite_over_pixbuf_8888),
+ PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, sse2_composite_over_pixbuf_0565),
+ PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, sse2_composite_over_pixbuf_0565),
+ PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area),
+
+ /* PIXMAN_OP_OVER_REVERSE */
+ PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, sse2_composite_over_reverse_n_8888),
+ PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, sse2_composite_over_reverse_n_8888),
+
+ /* PIXMAN_OP_ADD */
+ PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, sse2_composite_add_n_8888_8888_ca),
+ PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, sse2_composite_add_8_8),
+ PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, sse2_composite_add_8888_8888),
+ PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, sse2_composite_add_8888_8888),
+ PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, sse2_composite_add_n_8_8),
+ PIXMAN_STD_FAST_PATH (ADD, solid, null, a8, sse2_composite_add_n_8),
+
+ /* PIXMAN_OP_SRC */
+ PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, sse2_composite_src_n_8_8888),
+ PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, sse2_composite_src_n_8_8888),
+ PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, sse2_composite_src_n_8_8888),
+ PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, sse2_composite_src_n_8_8888),
+ PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, sse2_composite_src_x888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, sse2_composite_src_x888_8888),
+ PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, sse2_composite_copy_area),
+ PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, sse2_composite_copy_area),
+
+ /* PIXMAN_OP_IN */
+ PIXMAN_STD_FAST_PATH (IN, a8, null, a8, sse2_composite_in_8_8),
+ PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, sse2_composite_in_n_8_8),
+ PIXMAN_STD_FAST_PATH (IN, solid, null, a8, sse2_composite_in_n_8),
+
+ SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_COVER (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_NONE (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888),
+ SIMPLE_NEAREST_FAST_PATH_PAD (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888),
+
+ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_n_8888),
+ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_n_8888),
+ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_n_8888),
+ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_n_8888),
+
+ { PIXMAN_OP_NONE },
+};
+
+static pixman_bool_t
+sse2_blt (pixman_implementation_t *imp,
+ uint32_t * src_bits,
+ uint32_t * dst_bits,
+ int src_stride,
+ int dst_stride,
+ int src_bpp,
+ int dst_bpp,
+ int src_x,
+ int src_y,
+ int dst_x,
+ int dst_y,
+ int width,
+ int height)
+{
+ if (!pixman_blt_sse2 (
+ src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+ src_x, src_y, dst_x, dst_y, width, height))
+
+ {
+ return _pixman_implementation_blt (
+ imp->delegate,
+ src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+ src_x, src_y, dst_x, dst_y, width, height);
+ }
+
+ return TRUE;
+}
+
+#if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
+__attribute__((__force_align_arg_pointer__))
+#endif
+static pixman_bool_t
+sse2_fill (pixman_implementation_t *imp,
+ uint32_t * bits,
+ int stride,
+ int bpp,
+ int x,
+ int y,
+ int width,
+ int height,
+ uint32_t xor)
+{
+ if (!pixman_fill_sse2 (bits, stride, bpp, x, y, width, height, xor))
+ {
+ return _pixman_implementation_fill (
+ imp->delegate, bits, stride, bpp, x, y, width, height, xor);
+ }
+
+ return TRUE;
+}
+
+static uint32_t *
+sse2_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
+{
+ int w = iter->width;
+ __m128i ff000000 = mask_ff000000;
+ uint32_t *dst = iter->buffer;
+ uint32_t *src = (uint32_t *)iter->bits;
+
+ iter->bits += iter->stride;
+
+ while (w && ((unsigned long)dst) & 0x0f)
+ {
+ *dst++ = (*src++) | 0xff000000;
+ w--;
+ }
+
+ while (w >= 4)
+ {
+ save_128_aligned (
+ (__m128i *)dst, _mm_or_si128 (
+ load_128_unaligned ((__m128i *)src), ff000000));
+
+ dst += 4;
+ src += 4;
+ w -= 4;
+ }
+
+ while (w)
+ {
+ *dst++ = (*src++) | 0xff000000;
+ w--;
+ }
+
+ return iter->buffer;
+}
+
+static uint32_t *
+sse2_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask)
+{
+ int w = iter->width;
+ uint32_t *dst = iter->buffer;
+ uint16_t *src = (uint16_t *)iter->bits;
+ __m128i ff000000 = mask_ff000000;
+
+ iter->bits += iter->stride;
+
+ while (w && ((unsigned long)dst) & 0x0f)
+ {
+ uint16_t s = *src++;
+
+ *dst++ = CONVERT_0565_TO_8888 (s);
+ w--;
+ }
+
+ while (w >= 8)
+ {
+ __m128i lo, hi, s;
+
+ s = _mm_loadu_si128 ((__m128i *)src);
+
+ lo = unpack_565_to_8888 (_mm_unpacklo_epi16 (s, _mm_setzero_si128 ()));
+ hi = unpack_565_to_8888 (_mm_unpackhi_epi16 (s, _mm_setzero_si128 ()));
+
+ save_128_aligned ((__m128i *)(dst + 0), _mm_or_si128 (lo, ff000000));
+ save_128_aligned ((__m128i *)(dst + 4), _mm_or_si128 (hi, ff000000));
+
+ dst += 8;
+ src += 8;
+ w -= 8;
+ }
+
+ while (w)
+ {
+ uint16_t s = *src++;
+
+ *dst++ = CONVERT_0565_TO_8888 (s);
+ w--;
+ }
+
+ return iter->buffer;
+}
+
+static uint32_t *
+sse2_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
+{
+ int w = iter->width;
+ uint32_t *dst = iter->buffer;
+ uint8_t *src = iter->bits;
+ __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
+
+ iter->bits += iter->stride;
+
+ while (w && (((unsigned long)dst) & 15))
+ {
+ *dst++ = *(src++) << 24;
+ w--;
+ }
+
+ while (w >= 16)
+ {
+ xmm0 = _mm_loadu_si128((__m128i *)src);
+
+ xmm1 = _mm_unpacklo_epi8 (_mm_setzero_si128(), xmm0);
+ xmm2 = _mm_unpackhi_epi8 (_mm_setzero_si128(), xmm0);
+ xmm3 = _mm_unpacklo_epi16 (_mm_setzero_si128(), xmm1);
+ xmm4 = _mm_unpackhi_epi16 (_mm_setzero_si128(), xmm1);
+ xmm5 = _mm_unpacklo_epi16 (_mm_setzero_si128(), xmm2);
+ xmm6 = _mm_unpackhi_epi16 (_mm_setzero_si128(), xmm2);
+
+ _mm_store_si128(((__m128i *)(dst + 0)), xmm3);
+ _mm_store_si128(((__m128i *)(dst + 4)), xmm4);
+ _mm_store_si128(((__m128i *)(dst + 8)), xmm5);
+ _mm_store_si128(((__m128i *)(dst + 12)), xmm6);
+
+ dst += 16;
+ src += 16;
+ w -= 16;
+ }
+
+ while (w)
+ {
+ *dst++ = *(src++) << 24;
+ w--;
+ }
+
+ return iter->buffer;
+}
+
+typedef struct
+{
+ pixman_format_code_t format;
+ pixman_iter_get_scanline_t get_scanline;
+} fetcher_info_t;
+
+static const fetcher_info_t fetchers[] =
+{
+ { PIXMAN_x8r8g8b8, sse2_fetch_x8r8g8b8 },
+ { PIXMAN_r5g6b5, sse2_fetch_r5g6b5 },
+ { PIXMAN_a8, sse2_fetch_a8 },
+ { PIXMAN_null }
+};
+
+static void
+sse2_src_iter_init (pixman_implementation_t *imp,
+ pixman_iter_t *iter,
+ pixman_image_t *image,
+ int x, int y, int width, int height,
+ uint8_t *buffer, iter_flags_t flags)
+{
+#define FLAGS \
+ (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM)
+
+ if ((flags & ITER_NARROW) &&
+ (image->common.flags & FLAGS) == FLAGS &&
+ x >= 0 && y >= 0 &&
+ x + width <= image->bits.width &&
+ y + height <= image->bits.height)
+ {
+ const fetcher_info_t *f;
+
+ for (f = &fetchers[0]; f->format != PIXMAN_null; f++)
+ {
+ if (image->common.extended_format_code == f->format)
+ {
+ uint8_t *b = (uint8_t *)image->bits.bits;
+ int s = image->bits.rowstride * 4;
+
+ iter->bits = b + s * y + x * PIXMAN_FORMAT_BPP (f->format) / 8;
+ iter->stride = s;
+ iter->width = width;
+ iter->buffer = (uint32_t *)buffer;
+
+ iter->get_scanline = f->get_scanline;
+ return;
+ }
+ }
+ }
+
+ _pixman_implementation_src_iter_init (
+ imp->delegate, iter, image, x, y, width, height, buffer, flags);
+}
+
+#if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
+__attribute__((__force_align_arg_pointer__))
+#endif
+pixman_implementation_t *
+_pixman_implementation_create_sse2 (pixman_implementation_t *fallback)
+{
+ pixman_implementation_t *imp = _pixman_implementation_create (fallback, sse2_fast_paths);
+
+ /* SSE2 constants */
+ mask_565_r = create_mask_2x32_128 (0x00f80000, 0x00f80000);
+ mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000);
+ mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0);
+ mask_565_b = create_mask_2x32_128 (0x0000001f, 0x0000001f);
+ mask_red = create_mask_2x32_128 (0x00f80000, 0x00f80000);
+ mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00);
+ mask_blue = create_mask_2x32_128 (0x000000f8, 0x000000f8);
+ mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0);
+ mask_565_fix_g = create_mask_2x32_128 (0x0000c000, 0x0000c000);
+ mask_0080 = create_mask_16_128 (0x0080);
+ mask_00ff = create_mask_16_128 (0x00ff);
+ mask_0101 = create_mask_16_128 (0x0101);
+ mask_ffff = create_mask_16_128 (0xffff);
+ mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000);
+ mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000);
+
+ /* MMX constants */
+ mask_x565_rgb = create_mask_2x32_64 (0x000001f0, 0x003f001f);
+ mask_x565_unpack = create_mask_2x32_64 (0x00000084, 0x04100840);
+
+ mask_x0080 = create_mask_16_64 (0x0080);
+ mask_x00ff = create_mask_16_64 (0x00ff);
+ mask_x0101 = create_mask_16_64 (0x0101);
+ mask_x_alpha = create_mask_2x32_64 (0x00ff0000, 0x00000000);
+
+ _mm_empty ();
+
+ /* Set up function pointers */
+
+ /* SSE code patch for fbcompose.c */
+ imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u;
+ imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_u;
+ imp->combine_32[PIXMAN_OP_IN] = sse2_combine_in_u;
+ imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_u;
+ imp->combine_32[PIXMAN_OP_OUT] = sse2_combine_out_u;
+ imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_u;
+ imp->combine_32[PIXMAN_OP_ATOP] = sse2_combine_atop_u;
+ imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_u;
+ imp->combine_32[PIXMAN_OP_XOR] = sse2_combine_xor_u;
+ imp->combine_32[PIXMAN_OP_ADD] = sse2_combine_add_u;
+
+ imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u;
+
+ imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_ca;
+ imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_ca;
+ imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_ca;
+ imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_ca;
+ imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_ca;
+ imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_ca;
+ imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_ca;
+ imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_ca;
+
+ imp->blt = sse2_blt;
+ imp->fill = sse2_fill;
+
+ imp->src_iter_init = sse2_src_iter_init;
+
+ return imp;
+}
+
+#endif /* USE_SSE2 */
diff --git a/pixman/test/Makefile.am b/pixman/test/Makefile.am index 8d8471d1c..05cb83d06 100644 --- a/pixman/test/Makefile.am +++ b/pixman/test/Makefile.am @@ -1,142 +1,44 @@ -AM_CFLAGS = @OPENMP_CFLAGS@ -AM_LDFLAGS = @OPENMP_CFLAGS@ - -TEST_LDADD = $(top_builddir)/pixman/libpixman-1.la -lm -INCLUDES = -I$(top_srcdir)/pixman -I$(top_builddir)/pixman - -TESTPROGRAMS = \ - a1-trap-test \ - pdf-op-test \ - region-test \ - region-translate-test \ - fetch-test \ - oob-test \ - trap-crasher \ - alpha-loop \ - scaling-crash-test \ - gradient-crash-test \ - alphamap \ - stress-test \ - blitters-test \ - scaling-test \ - affine-test \ - composite - -a1_trap_test_LDADD = $(TEST_LDADD) -a1_trap_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ - -fetch_test_LDADD = $(TEST_LDADD) -fetch_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ - -trap_crasher_LDADD = $(TEST_LDADD) -trap_crasher_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ - -oob_test_LDADD = $(TEST_LDADD) -oob_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ - -scaling_crash_test_LDADD = $(TEST_LDADD) -scaling_crash_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ - -region_translate_test_LDADD = $(TEST_LDADD) -region_translate_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ - -pdf_op_test_LDADD = $(TEST_LDADD) -pdf_op_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -pdf_op_test_SOURCES = pdf-op-test.c utils.c utils.h - -region_test_LDADD = $(TEST_LDADD) -region_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -region_test_SOURCES = region-test.c utils.c utils.h - -blitters_test_LDADD = $(TEST_LDADD) -blitters_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -blitters_test_SOURCES = blitters-test.c utils.c utils.h - -scaling_test_LDADD = $(TEST_LDADD) -scaling_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -scaling_test_SOURCES = scaling-test.c utils.c utils.h - -affine_test_LDADD = $(TEST_LDADD) -affine_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -affine_test_SOURCES = affine-test.c utils.c utils.h - -alphamap_LDADD = $(TEST_LDADD) -alphamap_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -alphamap_SOURCES = alphamap.c utils.c utils.h - -alpha_loop_LDADD = $(TEST_LDADD) -alpha_loop_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -alpha_loop_SOURCES = alpha-loop.c utils.c utils.h - -composite_LDADD = $(TEST_LDADD) -composite_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -composite_SOURCES = composite.c utils.c utils.h - -gradient_crash_test_LDADD = $(TEST_LDADD) -gradient_crash_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -gradient_crash_test_SOURCES = gradient-crash-test.c utils.c utils.h - -stress_test_LDADD = $(TEST_LDADD) -stress_test_LDFLAGS = @TESTPROGS_EXTRA_LDFLAGS@ -stress_test_SOURCES = stress-test.c utils.c utils.h - -# GTK using test programs - -if HAVE_GTK - -GTK_LDADD = $(TEST_LDADD) $(GTK_LIBS) -GTK_UTILS = gtk-utils.c gtk-utils.h - -TESTPROGRAMS_GTK = \ - clip-test \ - clip-in \ - composite-test \ - gradient-test \ - radial-test \ - alpha-test \ - screen-test \ - convolution-test \ - trap-test - -INCLUDES += $(GTK_CFLAGS) - -gradient_test_LDADD = $(GTK_LDADD) -gradient_test_SOURCES = gradient-test.c $(GTK_UTILS) - -radial_test_LDADD = $(GTK_LDADD) -radial_test_SOURCES = radial-test.c utils.c utils.h $(GTK_UTILS) - -alpha_test_LDADD = $(GTK_LDADD) -alpha_test_SOURCES = alpha-test.c $(GTK_UTILS) - -composite_test_LDADD = $(GTK_LDADD) -composite_test_SOURCES = composite-test.c $(GTK_UTILS) - -clip_test_LDADD = $(GTK_LDADD) -clip_test_SOURCES = clip-test.c $(GTK_UTILS) - -clip_in_LDADD = $(GTK_LDADD) -clip_in_SOURCES = clip-in.c $(GTK_UTILS) - -trap_test_LDADD = $(GTK_LDADD) -trap_test_SOURCES = trap-test.c $(GTK_UTILS) - -screen_test_LDADD = $(GTK_LDADD) -screen_test_SOURCES = screen-test.c $(GTK_UTILS) - -convolution_test_LDADD = $(GTK_LDADD) -convolution_test_SOURCES = convolution-test.c $(GTK_UTILS) - -endif - -# Benchmarks - -BENCHMARKS = \ - lowlevel-blt-bench - -lowlevel_blt_bench_SOURCES = lowlevel-blt-bench.c utils.c utils.h -lowlevel_blt_bench_LDADD = $(TEST_LDADD) - -noinst_PROGRAMS = $(TESTPROGRAMS) $(TESTPROGRAMS_GTK) $(BENCHMARKS) - -TESTS = $(TESTPROGRAMS) +AM_CFLAGS = @OPENMP_CFLAGS@
+AM_LDFLAGS = @OPENMP_CFLAGS@ @TESTPROGS_EXTRA_LDFLAGS@
+LDADD = $(top_builddir)/pixman/libpixman-1.la -lm
+INCLUDES = -I$(top_srcdir)/pixman -I$(top_builddir)/pixman
+
+TESTPROGRAMS = \
+ a1-trap-test \
+ pdf-op-test \
+ region-test \
+ region-translate-test \
+ fetch-test \
+ oob-test \
+ trap-crasher \
+ alpha-loop \
+ scaling-crash-test \
+ gradient-crash-test \
+ alphamap \
+ stress-test \
+ blitters-test \
+ scaling-test \
+ affine-test \
+ composite
+
+pdf_op_test_SOURCES = pdf-op-test.c utils.c utils.h
+region_test_SOURCES = region-test.c utils.c utils.h
+blitters_test_SOURCES = blitters-test.c utils.c utils.h
+scaling_test_SOURCES = scaling-test.c utils.c utils.h
+affine_test_SOURCES = affine-test.c utils.c utils.h
+alphamap_SOURCES = alphamap.c utils.c utils.h
+alpha_loop_SOURCES = alpha-loop.c utils.c utils.h
+composite_SOURCES = composite.c utils.c utils.h
+gradient_crash_test_SOURCES = gradient-crash-test.c utils.c utils.h
+stress_test_SOURCES = stress-test.c utils.c utils.h
+
+# Benchmarks
+
+BENCHMARKS = \
+ lowlevel-blt-bench
+
+lowlevel_blt_bench_SOURCES = lowlevel-blt-bench.c utils.c utils.h
+
+noinst_PROGRAMS = $(TESTPROGRAMS) $(BENCHMARKS)
+
+TESTS = $(TESTPROGRAMS)
diff --git a/pixman/test/scaling-test.c b/pixman/test/scaling-test.c index 7b78017a3..2a0826a46 100644 --- a/pixman/test/scaling-test.c +++ b/pixman/test/scaling-test.c @@ -12,10 +12,10 @@ #include <stdio.h>
#include "utils.h"
-#define MAX_SRC_WIDTH 16
-#define MAX_SRC_HEIGHT 16
-#define MAX_DST_WIDTH 16
-#define MAX_DST_HEIGHT 16
+#define MAX_SRC_WIDTH 48
+#define MAX_SRC_HEIGHT 8
+#define MAX_DST_WIDTH 48
+#define MAX_DST_HEIGHT 8
#define MAX_STRIDE 4
/*
@@ -27,24 +27,32 @@ test_composite (int testnum, {
int i;
pixman_image_t * src_img;
+ pixman_image_t * mask_img;
pixman_image_t * dst_img;
pixman_transform_t transform;
pixman_region16_t clip;
int src_width, src_height;
+ int mask_width, mask_height;
int dst_width, dst_height;
- int src_stride, dst_stride;
+ int src_stride, mask_stride, dst_stride;
int src_x, src_y;
+ int mask_x, mask_y;
int dst_x, dst_y;
int src_bpp;
+ int mask_bpp = 1;
int dst_bpp;
int w, h;
pixman_fixed_t scale_x = 65536, scale_y = 65536;
pixman_fixed_t translate_x = 0, translate_y = 0;
+ pixman_fixed_t mask_scale_x = 65536, mask_scale_y = 65536;
+ pixman_fixed_t mask_translate_x = 0, mask_translate_y = 0;
pixman_op_t op;
pixman_repeat_t repeat = PIXMAN_REPEAT_NONE;
+ pixman_repeat_t mask_repeat = PIXMAN_REPEAT_NONE;
pixman_format_code_t src_fmt, dst_fmt;
uint32_t * srcbuf;
uint32_t * dstbuf;
+ uint32_t * maskbuf;
uint32_t crc32;
FLOAT_REGS_CORRUPTION_DETECTOR_START ();
@@ -52,34 +60,68 @@ test_composite (int testnum, src_bpp = (lcg_rand_n (2) == 0) ? 2 : 4;
dst_bpp = (lcg_rand_n (2) == 0) ? 2 : 4;
- op = (lcg_rand_n (2) == 0) ? PIXMAN_OP_SRC : PIXMAN_OP_OVER;
+ switch (lcg_rand_n (3))
+ {
+ case 0:
+ op = PIXMAN_OP_SRC;
+ break;
+ case 1:
+ op = PIXMAN_OP_OVER;
+ break;
+ default:
+ op = PIXMAN_OP_ADD;
+ break;
+ }
src_width = lcg_rand_n (MAX_SRC_WIDTH) + 1;
src_height = lcg_rand_n (MAX_SRC_HEIGHT) + 1;
+
+ if (lcg_rand_n (2))
+ {
+ mask_width = lcg_rand_n (MAX_SRC_WIDTH) + 1;
+ mask_height = lcg_rand_n (MAX_SRC_HEIGHT) + 1;
+ }
+ else
+ {
+ mask_width = mask_height = 1;
+ }
+
dst_width = lcg_rand_n (MAX_DST_WIDTH) + 1;
dst_height = lcg_rand_n (MAX_DST_HEIGHT) + 1;
src_stride = src_width * src_bpp + lcg_rand_n (MAX_STRIDE) * src_bpp;
+ mask_stride = mask_width * mask_bpp + lcg_rand_n (MAX_STRIDE) * mask_bpp;
dst_stride = dst_width * dst_bpp + lcg_rand_n (MAX_STRIDE) * dst_bpp;
if (src_stride & 3)
src_stride += 2;
+ if (mask_stride & 1)
+ mask_stride += 1;
+ if (mask_stride & 2)
+ mask_stride += 2;
+
if (dst_stride & 3)
dst_stride += 2;
src_x = -(src_width / 4) + lcg_rand_n (src_width * 3 / 2);
src_y = -(src_height / 4) + lcg_rand_n (src_height * 3 / 2);
+ mask_x = -(mask_width / 4) + lcg_rand_n (mask_width * 3 / 2);
+ mask_y = -(mask_height / 4) + lcg_rand_n (mask_height * 3 / 2);
dst_x = -(dst_width / 4) + lcg_rand_n (dst_width * 3 / 2);
dst_y = -(dst_height / 4) + lcg_rand_n (dst_height * 3 / 2);
w = lcg_rand_n (dst_width * 3 / 2 - dst_x);
h = lcg_rand_n (dst_height * 3 / 2 - dst_y);
srcbuf = (uint32_t *)malloc (src_stride * src_height);
+ maskbuf = (uint32_t *)malloc (mask_stride * mask_height);
dstbuf = (uint32_t *)malloc (dst_stride * dst_height);
for (i = 0; i < src_stride * src_height; i++)
*((uint8_t *)srcbuf + i) = lcg_rand_n (256);
+ for (i = 0; i < mask_stride * mask_height; i++)
+ *((uint8_t *)maskbuf + i) = lcg_rand_n (256);
+
for (i = 0; i < dst_stride * dst_height; i++)
*((uint8_t *)dstbuf + i) = lcg_rand_n (256);
@@ -92,13 +134,16 @@ test_composite (int testnum, src_img = pixman_image_create_bits (
src_fmt, src_width, src_height, srcbuf, src_stride);
+ mask_img = pixman_image_create_bits (
+ PIXMAN_a8, mask_width, mask_height, maskbuf, mask_stride);
+
dst_img = pixman_image_create_bits (
dst_fmt, dst_width, dst_height, dstbuf, dst_stride);
image_endian_swap (src_img, src_bpp * 8);
image_endian_swap (dst_img, dst_bpp * 8);
- if (lcg_rand_n (8) > 0)
+ if (lcg_rand_n (4) > 0)
{
scale_x = -32768 * 3 + lcg_rand_N (65536 * 5);
scale_y = -32768 * 3 + lcg_rand_N (65536 * 5);
@@ -109,6 +154,40 @@ test_composite (int testnum, pixman_image_set_transform (src_img, &transform);
}
+ if (lcg_rand_n (2) > 0)
+ {
+ mask_scale_x = -32768 * 3 + lcg_rand_N (65536 * 5);
+ mask_scale_y = -32768 * 3 + lcg_rand_N (65536 * 5);
+ mask_translate_x = lcg_rand_N (65536);
+ mask_translate_y = lcg_rand_N (65536);
+ pixman_transform_init_scale (&transform, mask_scale_x, mask_scale_y);
+ pixman_transform_translate (&transform, NULL, mask_translate_x, mask_translate_y);
+ pixman_image_set_transform (mask_img, &transform);
+ }
+
+ switch (lcg_rand_n (4))
+ {
+ case 0:
+ mask_repeat = PIXMAN_REPEAT_NONE;
+ break;
+
+ case 1:
+ mask_repeat = PIXMAN_REPEAT_NORMAL;
+ break;
+
+ case 2:
+ mask_repeat = PIXMAN_REPEAT_PAD;
+ break;
+
+ case 3:
+ mask_repeat = PIXMAN_REPEAT_REFLECT;
+ break;
+
+ default:
+ break;
+ }
+ pixman_image_set_repeat (mask_img, mask_repeat);
+
switch (lcg_rand_n (4))
{
case 0:
@@ -137,6 +216,11 @@ test_composite (int testnum, else
pixman_image_set_filter (src_img, PIXMAN_FILTER_BILINEAR, NULL, 0);
+ if (lcg_rand_n (2))
+ pixman_image_set_filter (mask_img, PIXMAN_FILTER_NEAREST, NULL, 0);
+ else
+ pixman_image_set_filter (mask_img, PIXMAN_FILTER_BILINEAR, NULL, 0);
+
if (verbose)
{
printf ("src_fmt=%08X, dst_fmt=%08X\n", src_fmt, dst_fmt);
@@ -183,6 +267,34 @@ test_composite (int testnum, {
pixman_box16_t clip_boxes[2];
int n = lcg_rand_n (2) + 1;
+
+ for (i = 0; i < n; i++)
+ {
+ clip_boxes[i].x1 = lcg_rand_n (mask_width);
+ clip_boxes[i].y1 = lcg_rand_n (mask_height);
+ clip_boxes[i].x2 =
+ clip_boxes[i].x1 + lcg_rand_n (mask_width - clip_boxes[i].x1);
+ clip_boxes[i].y2 =
+ clip_boxes[i].y1 + lcg_rand_n (mask_height - clip_boxes[i].y1);
+
+ if (verbose)
+ {
+ printf ("mask clip box: [%d,%d-%d,%d]\n",
+ clip_boxes[i].x1, clip_boxes[i].y1,
+ clip_boxes[i].x2, clip_boxes[i].y2);
+ }
+ }
+
+ pixman_region_init_rects (&clip, clip_boxes, n);
+ pixman_image_set_clip_region (mask_img, &clip);
+ pixman_image_set_source_clipping (mask_img, 1);
+ pixman_region_fini (&clip);
+ }
+
+ if (lcg_rand_n (8) == 0)
+ {
+ pixman_box16_t clip_boxes[2];
+ int n = lcg_rand_n (2) + 1;
for (i = 0; i < n; i++)
{
clip_boxes[i].x1 = lcg_rand_n (dst_width);
@@ -204,8 +316,12 @@ test_composite (int testnum, pixman_region_fini (&clip);
}
- pixman_image_composite (op, src_img, NULL, dst_img,
+ if (lcg_rand_n (2) == 0)
+ pixman_image_composite (op, src_img, NULL, dst_img,
src_x, src_y, 0, 0, dst_x, dst_y, w, h);
+ else
+ pixman_image_composite (op, src_img, mask_img, dst_img,
+ src_x, src_y, mask_x, mask_y, dst_x, dst_y, w, h);
if (dst_fmt == PIXMAN_x8r8g8b8)
{
@@ -230,10 +346,12 @@ test_composite (int testnum, }
pixman_image_unref (src_img);
+ pixman_image_unref (mask_img);
pixman_image_unref (dst_img);
crc32 = compute_crc32 (0, dstbuf, dst_stride * dst_height);
free (srcbuf);
+ free (maskbuf);
free (dstbuf);
FLOAT_REGS_CORRUPTION_DETECTOR_FINISH ();
@@ -245,6 +363,6 @@ main (int argc, const char *argv[]) {
pixman_disable_out_of_bounds_workaround ();
- return fuzzer_test_main("scaling", 8000000, 0x7F1AB59F,
+ return fuzzer_test_main("scaling", 8000000, 0x80DF1CB2,
test_composite, argc, argv);
}
|