aboutsummaryrefslogtreecommitdiff
path: root/mesalib/src/glsl
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2011-07-01 14:21:21 +0200
committermarha <marha@users.sourceforge.net>2011-07-01 14:21:21 +0200
commitd9f970a847e1af706f07560ef163b229bb592307 (patch)
tree2fe2204f673487f3a8d7150f14cc456c6eb48d62 /mesalib/src/glsl
parent0feab87a4300a3e204e259d14a0a63e58e4a3c8f (diff)
downloadvcxsrv-d9f970a847e1af706f07560ef163b229bb592307.tar.gz
vcxsrv-d9f970a847e1af706f07560ef163b229bb592307.tar.bz2
vcxsrv-d9f970a847e1af706f07560ef163b229bb592307.zip
xwininfo libX11 mesa mkfontscale xserver xkeyboard-config git update 1 Juli
2011
Diffstat (limited to 'mesalib/src/glsl')
-rw-r--r--mesalib/src/glsl/ast_function.cpp35
-rw-r--r--mesalib/src/glsl/ast_to_hir.cpp16
-rw-r--r--mesalib/src/glsl/glsl_parser_extras.cpp327
-rw-r--r--mesalib/src/glsl/glsl_parser_extras.h44
-rw-r--r--mesalib/src/glsl/ir.cpp8
-rw-r--r--mesalib/src/glsl/ir.h4
-rw-r--r--mesalib/src/glsl/ir_constant_expression.cpp13
-rw-r--r--mesalib/src/glsl/ir_function.cpp9
-rw-r--r--mesalib/src/glsl/ir_validate.cpp14
-rw-r--r--mesalib/src/glsl/lower_if_to_cond_assign.cpp4
-rw-r--r--mesalib/src/glsl/lower_instructions.cpp581
-rw-r--r--mesalib/src/glsl/lower_mat_op_to_vec.cpp298
12 files changed, 732 insertions, 621 deletions
diff --git a/mesalib/src/glsl/ast_function.cpp b/mesalib/src/glsl/ast_function.cpp
index 3ba699ad9..60a2c617f 100644
--- a/mesalib/src/glsl/ast_function.cpp
+++ b/mesalib/src/glsl/ast_function.cpp
@@ -271,17 +271,36 @@ convert_component(ir_rvalue *src, const glsl_type *desired_type)
assert(a <= GLSL_TYPE_BOOL);
assert(b <= GLSL_TYPE_BOOL);
- if ((a == b) || (src->type->is_integer() && desired_type->is_integer()))
+ if (a == b)
return src;
switch (a) {
case GLSL_TYPE_UINT:
+ switch (b) {
+ case GLSL_TYPE_INT:
+ result = new(ctx) ir_expression(ir_unop_i2u, src);
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_i2u,
+ new(ctx) ir_expression(ir_unop_f2i, src));
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_i2u,
+ new(ctx) ir_expression(ir_unop_b2i, src));
+ break;
+ }
+ break;
case GLSL_TYPE_INT:
- if (b == GLSL_TYPE_FLOAT)
- result = new(ctx) ir_expression(ir_unop_f2i, desired_type, src, NULL);
- else {
- assert(b == GLSL_TYPE_BOOL);
- result = new(ctx) ir_expression(ir_unop_b2i, desired_type, src, NULL);
+ switch (b) {
+ case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_u2i, src);
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_f2i, src);
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_b2i, src);
+ break;
}
break;
case GLSL_TYPE_FLOAT:
@@ -300,6 +319,9 @@ convert_component(ir_rvalue *src, const glsl_type *desired_type)
case GLSL_TYPE_BOOL:
switch (b) {
case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_i2b,
+ new(ctx) ir_expression(ir_unop_u2i, src));
+ break;
case GLSL_TYPE_INT:
result = new(ctx) ir_expression(ir_unop_i2b, desired_type, src, NULL);
break;
@@ -311,6 +333,7 @@ convert_component(ir_rvalue *src, const glsl_type *desired_type)
}
assert(result != NULL);
+ assert(result->type == desired_type);
/* Try constant folding; it may fold in the conversion we just added. */
ir_constant *const constant = result->constant_expression_value();
diff --git a/mesalib/src/glsl/ast_to_hir.cpp b/mesalib/src/glsl/ast_to_hir.cpp
index 3b87f0d56..2e54e8c22 100644
--- a/mesalib/src/glsl/ast_to_hir.cpp
+++ b/mesalib/src/glsl/ast_to_hir.cpp
@@ -447,9 +447,17 @@ modulus_result_type(const struct glsl_type *type_a,
* integer vectors. The operand types must both be signed or both be
* unsigned."
*/
- if (!type_a->is_integer() || !type_b->is_integer()
- || (type_a->base_type != type_b->base_type)) {
- _mesa_glsl_error(loc, state, "type mismatch");
+ if (!type_a->is_integer()) {
+ _mesa_glsl_error(loc, state, "LHS of operator %% must be an integer.");
+ return glsl_type::error_type;
+ }
+ if (!type_b->is_integer()) {
+ _mesa_glsl_error(loc, state, "RHS of operator %% must be an integer.");
+ return glsl_type::error_type;
+ }
+ if (type_a->base_type != type_b->base_type) {
+ _mesa_glsl_error(loc, state,
+ "operands of %% must have the same base type");
return glsl_type::error_type;
}
@@ -1940,7 +1948,7 @@ apply_type_qualifier_to_variable(const struct ast_type_qualifier *qual,
break;
case fragment_shader:
- if (!global_scope || (var->mode != ir_var_in)) {
+ if (!global_scope || (var->mode != ir_var_out)) {
fail = true;
string = "output";
}
diff --git a/mesalib/src/glsl/glsl_parser_extras.cpp b/mesalib/src/glsl/glsl_parser_extras.cpp
index d9aa300bb..cc781378d 100644
--- a/mesalib/src/glsl/glsl_parser_extras.cpp
+++ b/mesalib/src/glsl/glsl_parser_extras.cpp
@@ -165,133 +165,242 @@ _mesa_glsl_warning(const YYLTYPE *locp, _mesa_glsl_parse_state *state,
}
+/**
+ * Enum representing the possible behaviors that can be specified in
+ * an #extension directive.
+ */
+enum ext_behavior {
+ extension_disable,
+ extension_enable,
+ extension_require,
+ extension_warn
+};
+
+/**
+ * Element type for _mesa_glsl_supported_extensions
+ */
+struct _mesa_glsl_extension {
+ /**
+ * Name of the extension when referred to in a GLSL extension
+ * statement
+ */
+ const char *name;
+
+ /** True if this extension is available to vertex shaders */
+ bool avail_in_VS;
+
+ /** True if this extension is available to geometry shaders */
+ bool avail_in_GS;
+
+ /** True if this extension is available to fragment shaders */
+ bool avail_in_FS;
+
+ /** True if this extension is available to desktop GL shaders */
+ bool avail_in_GL;
+
+ /** True if this extension is available to GLES shaders */
+ bool avail_in_ES;
+
+ /**
+ * Flag in the gl_extensions struct indicating whether this
+ * extension is supported by the driver, or
+ * &gl_extensions::dummy_true if supported by all drivers.
+ *
+ * Note: the type (GLboolean gl_extensions::*) is a "pointer to
+ * member" type, the type-safe alternative to the "offsetof" macro.
+ * In a nutshell:
+ *
+ * - foo bar::* p declares p to be an "offset" to a field of type
+ * foo that exists within struct bar
+ * - &bar::baz computes the "offset" of field baz within struct bar
+ * - x.*p accesses the field of x that exists at "offset" p
+ * - x->*p is equivalent to (*x).*p
+ */
+ const GLboolean gl_extensions::* supported_flag;
+
+ /**
+ * Flag in the _mesa_glsl_parse_state struct that should be set
+ * when this extension is enabled.
+ *
+ * See note in _mesa_glsl_extension::supported_flag about "pointer
+ * to member" types.
+ */
+ bool _mesa_glsl_parse_state::* enable_flag;
+
+ /**
+ * Flag in the _mesa_glsl_parse_state struct that should be set
+ * when the shader requests "warn" behavior for this extension.
+ *
+ * See note in _mesa_glsl_extension::supported_flag about "pointer
+ * to member" types.
+ */
+ bool _mesa_glsl_parse_state::* warn_flag;
+
+
+ bool compatible_with_state(const _mesa_glsl_parse_state *state) const;
+ void set_flags(_mesa_glsl_parse_state *state, ext_behavior behavior) const;
+};
+
+#define EXT(NAME, VS, GS, FS, GL, ES, SUPPORTED_FLAG) \
+ { "GL_" #NAME, VS, GS, FS, GL, ES, &gl_extensions::SUPPORTED_FLAG, \
+ &_mesa_glsl_parse_state::NAME##_enable, \
+ &_mesa_glsl_parse_state::NAME##_warn }
+
+/**
+ * Table of extensions that can be enabled/disabled within a shader,
+ * and the conditions under which they are supported.
+ */
+static const _mesa_glsl_extension _mesa_glsl_supported_extensions[] = {
+ /* target availability API availability */
+ /* name VS GS FS GL ES supported flag */
+ EXT(ARB_draw_buffers, false, false, true, true, false, dummy_true),
+ EXT(ARB_draw_instanced, true, false, false, true, false, ARB_draw_instanced),
+ EXT(ARB_explicit_attrib_location, true, false, true, true, false, ARB_explicit_attrib_location),
+ EXT(ARB_fragment_coord_conventions, true, false, true, true, false, ARB_fragment_coord_conventions),
+ EXT(ARB_texture_rectangle, true, false, true, true, false, dummy_true),
+ EXT(EXT_texture_array, true, false, true, true, false, EXT_texture_array),
+ EXT(ARB_shader_texture_lod, true, false, true, true, false, ARB_shader_texture_lod),
+ EXT(ARB_shader_stencil_export, false, false, true, true, false, ARB_shader_stencil_export),
+ EXT(AMD_conservative_depth, true, false, true, true, false, AMD_conservative_depth),
+ EXT(AMD_shader_stencil_export, false, false, true, true, false, ARB_shader_stencil_export),
+ EXT(OES_texture_3D, true, false, true, false, true, EXT_texture3D),
+};
+
+#undef EXT
+
+
+/**
+ * Determine whether a given extension is compatible with the target,
+ * API, and extension information in the current parser state.
+ */
+bool _mesa_glsl_extension::compatible_with_state(const _mesa_glsl_parse_state *
+ state) const
+{
+ /* Check that this extension matches the type of shader we are
+ * compiling to.
+ */
+ switch (state->target) {
+ case vertex_shader:
+ if (!this->avail_in_VS) {
+ return false;
+ }
+ break;
+ case geometry_shader:
+ if (!this->avail_in_GS) {
+ return false;
+ }
+ break;
+ case fragment_shader:
+ if (!this->avail_in_FS) {
+ return false;
+ }
+ break;
+ default:
+ assert (!"Unrecognized shader target");
+ return false;
+ }
+
+ /* Check that this extension matches whether we are compiling
+ * for desktop GL or GLES.
+ */
+ if (state->es_shader) {
+ if (!this->avail_in_ES) return false;
+ } else {
+ if (!this->avail_in_GL) return false;
+ }
+
+ /* Check that this extension is supported by the OpenGL
+ * implementation.
+ *
+ * Note: the ->* operator indexes into state->extensions by the
+ * offset this->supported_flag. See
+ * _mesa_glsl_extension::supported_flag for more info.
+ */
+ return state->extensions->*(this->supported_flag);
+}
+
+/**
+ * Set the appropriate flags in the parser state to establish the
+ * given behavior for this extension.
+ */
+void _mesa_glsl_extension::set_flags(_mesa_glsl_parse_state *state,
+ ext_behavior behavior) const
+{
+ /* Note: the ->* operator indexes into state by the
+ * offsets this->enable_flag and this->warn_flag. See
+ * _mesa_glsl_extension::supported_flag for more info.
+ */
+ state->*(this->enable_flag) = (behavior != extension_disable);
+ state->*(this->warn_flag) = (behavior == extension_warn);
+}
+
+/**
+ * Find an extension by name in _mesa_glsl_supported_extensions. If
+ * the name is not found, return NULL.
+ */
+static const _mesa_glsl_extension *find_extension(const char *name)
+{
+ for (unsigned i = 0; i < Elements(_mesa_glsl_supported_extensions); ++i) {
+ if (strcmp(name, _mesa_glsl_supported_extensions[i].name) == 0) {
+ return &_mesa_glsl_supported_extensions[i];
+ }
+ }
+ return NULL;
+}
+
+
bool
_mesa_glsl_process_extension(const char *name, YYLTYPE *name_locp,
- const char *behavior, YYLTYPE *behavior_locp,
+ const char *behavior_string, YYLTYPE *behavior_locp,
_mesa_glsl_parse_state *state)
{
- enum {
- extension_disable,
- extension_enable,
- extension_require,
- extension_warn
- } ext_mode;
-
- if (strcmp(behavior, "warn") == 0) {
- ext_mode = extension_warn;
- } else if (strcmp(behavior, "require") == 0) {
- ext_mode = extension_require;
- } else if (strcmp(behavior, "enable") == 0) {
- ext_mode = extension_enable;
- } else if (strcmp(behavior, "disable") == 0) {
- ext_mode = extension_disable;
+ ext_behavior behavior;
+ if (strcmp(behavior_string, "warn") == 0) {
+ behavior = extension_warn;
+ } else if (strcmp(behavior_string, "require") == 0) {
+ behavior = extension_require;
+ } else if (strcmp(behavior_string, "enable") == 0) {
+ behavior = extension_enable;
+ } else if (strcmp(behavior_string, "disable") == 0) {
+ behavior = extension_disable;
} else {
_mesa_glsl_error(behavior_locp, state,
"Unknown extension behavior `%s'",
- behavior);
+ behavior_string);
return false;
}
- bool unsupported = false;
-
if (strcmp(name, "all") == 0) {
- if ((ext_mode == extension_enable) || (ext_mode == extension_require)) {
+ if ((behavior == extension_enable) || (behavior == extension_require)) {
_mesa_glsl_error(name_locp, state, "Cannot %s all extensions",
- (ext_mode == extension_enable)
+ (behavior == extension_enable)
? "enable" : "require");
return false;
- }
- } else if (strcmp(name, "GL_ARB_draw_buffers") == 0) {
- /* This extension is only supported in fragment shaders.
- */
- if (state->target != fragment_shader) {
- unsupported = true;
} else {
- state->ARB_draw_buffers_enable = (ext_mode != extension_disable);
- state->ARB_draw_buffers_warn = (ext_mode == extension_warn);
+ for (unsigned i = 0;
+ i < Elements(_mesa_glsl_supported_extensions); ++i) {
+ const _mesa_glsl_extension *extension
+ = &_mesa_glsl_supported_extensions[i];
+ if (extension->compatible_with_state(state)) {
+ _mesa_glsl_supported_extensions[i].set_flags(state, behavior);
+ }
+ }
}
- } else if (strcmp(name, "GL_ARB_draw_instanced") == 0) {
- state->ARB_draw_instanced_enable = (ext_mode != extension_disable);
- state->ARB_draw_instanced_warn = (ext_mode == extension_warn);
-
- /* This extension is only supported in vertex shaders.
- */
- unsupported = (state->target != vertex_shader)
- || !state->extensions->ARB_draw_instanced;
- } else if (strcmp(name, "GL_ARB_explicit_attrib_location") == 0) {
- state->ARB_explicit_attrib_location_enable =
- (ext_mode != extension_disable);
- state->ARB_explicit_attrib_location_warn =
- (ext_mode == extension_warn);
-
- unsupported = !state->extensions->ARB_explicit_attrib_location;
- } else if (strcmp(name, "GL_ARB_fragment_coord_conventions") == 0) {
- state->ARB_fragment_coord_conventions_enable =
- (ext_mode != extension_disable);
- state->ARB_fragment_coord_conventions_warn =
- (ext_mode == extension_warn);
-
- unsupported = !state->extensions->ARB_fragment_coord_conventions;
- } else if (strcmp(name, "GL_ARB_texture_rectangle") == 0) {
- state->ARB_texture_rectangle_enable = (ext_mode != extension_disable);
- state->ARB_texture_rectangle_warn = (ext_mode == extension_warn);
- } else if (strcmp(name, "GL_EXT_texture_array") == 0) {
- state->EXT_texture_array_enable = (ext_mode != extension_disable);
- state->EXT_texture_array_warn = (ext_mode == extension_warn);
-
- unsupported = !state->extensions->EXT_texture_array;
- } else if (strcmp(name, "GL_ARB_shader_texture_lod") == 0) {
- /* Force ARB_texture_rectangle to be on so sampler2DRects are defined */
- state->ARB_texture_rectangle_enable = true;
-
- state->ARB_shader_texture_lod_enable = (ext_mode != extension_disable);
- state->ARB_shader_texture_lod_warn = (ext_mode == extension_warn);
-
- unsupported = !state->extensions->ARB_shader_texture_lod;
- } else if (strcmp(name, "GL_ARB_shader_stencil_export") == 0) {
- state->ARB_shader_stencil_export_enable = (ext_mode != extension_disable);
- state->ARB_shader_stencil_export_warn = (ext_mode == extension_warn);
-
- /* This extension is only supported in fragment shaders.
- */
- unsupported = (state->target != fragment_shader)
- || !state->extensions->ARB_shader_stencil_export;
- } else if (strcmp(name, "GL_AMD_conservative_depth") == 0) {
- /* The AMD_conservative spec does not forbid requiring the extension in
- * the vertex shader.
- */
- state->AMD_conservative_depth_enable = (ext_mode != extension_disable);
- state->AMD_conservative_depth_warn = (ext_mode == extension_warn);
- unsupported = !state->extensions->AMD_conservative_depth;
- } else if (strcmp(name, "GL_AMD_shader_stencil_export") == 0) {
- state->AMD_shader_stencil_export_enable = (ext_mode != extension_disable);
- state->AMD_shader_stencil_export_warn = (ext_mode == extension_warn);
-
- /* This extension is only supported in fragment shaders.
- * Both the ARB and AMD variants share the same ARB flag
- * in gl_extensions.
- */
- unsupported = (state->target != fragment_shader)
- || !state->extensions->ARB_shader_stencil_export;
- } else if (strcmp(name, "GL_OES_texture_3D") == 0 && state->es_shader) {
- state->OES_texture_3D_enable = (ext_mode != extension_disable);
- state->OES_texture_3D_warn = (ext_mode == extension_warn);
-
- unsupported = !state->extensions->EXT_texture3D;
} else {
- unsupported = true;
- }
-
- if (unsupported) {
- static const char *const fmt = "extension `%s' unsupported in %s shader";
-
- if (ext_mode == extension_require) {
- _mesa_glsl_error(name_locp, state, fmt,
- name, _mesa_glsl_shader_target_name(state->target));
- return false;
+ const _mesa_glsl_extension *extension = find_extension(name);
+ if (extension && extension->compatible_with_state(state)) {
+ extension->set_flags(state, behavior);
} else {
- _mesa_glsl_warning(name_locp, state, fmt,
- name, _mesa_glsl_shader_target_name(state->target));
+ static const char *const fmt = "extension `%s' unsupported in %s shader";
+
+ if (behavior == extension_require) {
+ _mesa_glsl_error(name_locp, state, fmt,
+ name, _mesa_glsl_shader_target_name(state->target));
+ return false;
+ } else {
+ _mesa_glsl_warning(name_locp, state, fmt,
+ name, _mesa_glsl_shader_target_name(state->target));
+ }
}
}
diff --git a/mesalib/src/glsl/glsl_parser_extras.h b/mesalib/src/glsl/glsl_parser_extras.h
index 878d2ae3f..2f4d3cba7 100644
--- a/mesalib/src/glsl/glsl_parser_extras.h
+++ b/mesalib/src/glsl/glsl_parser_extras.h
@@ -156,28 +156,28 @@ struct _mesa_glsl_parse_state {
* \name Enable bits for GLSL extensions
*/
/*@{*/
- unsigned ARB_draw_buffers_enable:1;
- unsigned ARB_draw_buffers_warn:1;
- unsigned ARB_draw_instanced_enable:1;
- unsigned ARB_draw_instanced_warn:1;
- unsigned ARB_explicit_attrib_location_enable:1;
- unsigned ARB_explicit_attrib_location_warn:1;
- unsigned ARB_fragment_coord_conventions_enable:1;
- unsigned ARB_fragment_coord_conventions_warn:1;
- unsigned ARB_texture_rectangle_enable:1;
- unsigned ARB_texture_rectangle_warn:1;
- unsigned EXT_texture_array_enable:1;
- unsigned EXT_texture_array_warn:1;
- unsigned ARB_shader_texture_lod_enable:1;
- unsigned ARB_shader_texture_lod_warn:1;
- unsigned ARB_shader_stencil_export_enable:1;
- unsigned ARB_shader_stencil_export_warn:1;
- unsigned AMD_conservative_depth_enable:1;
- unsigned AMD_conservative_depth_warn:1;
- unsigned AMD_shader_stencil_export_enable:1;
- unsigned AMD_shader_stencil_export_warn:1;
- unsigned OES_texture_3D_enable:1;
- unsigned OES_texture_3D_warn:1;
+ bool ARB_draw_buffers_enable;
+ bool ARB_draw_buffers_warn;
+ bool ARB_draw_instanced_enable;
+ bool ARB_draw_instanced_warn;
+ bool ARB_explicit_attrib_location_enable;
+ bool ARB_explicit_attrib_location_warn;
+ bool ARB_fragment_coord_conventions_enable;
+ bool ARB_fragment_coord_conventions_warn;
+ bool ARB_texture_rectangle_enable;
+ bool ARB_texture_rectangle_warn;
+ bool EXT_texture_array_enable;
+ bool EXT_texture_array_warn;
+ bool ARB_shader_texture_lod_enable;
+ bool ARB_shader_texture_lod_warn;
+ bool ARB_shader_stencil_export_enable;
+ bool ARB_shader_stencil_export_warn;
+ bool AMD_conservative_depth_enable;
+ bool AMD_conservative_depth_warn;
+ bool AMD_shader_stencil_export_enable;
+ bool AMD_shader_stencil_export_warn;
+ bool OES_texture_3D_enable;
+ bool OES_texture_3D_warn;
/*@}*/
/** Extensions supported by the OpenGL implementation. */
diff --git a/mesalib/src/glsl/ir.cpp b/mesalib/src/glsl/ir.cpp
index a3623b31e..95689dc10 100644
--- a/mesalib/src/glsl/ir.cpp
+++ b/mesalib/src/glsl/ir.cpp
@@ -272,6 +272,7 @@ ir_expression::ir_expression(int op, ir_rvalue *op0)
case ir_unop_f2i:
case ir_unop_b2i:
+ case ir_unop_u2i:
this->type = glsl_type::get_instance(GLSL_TYPE_INT,
op0->type->vector_elements, 1);
break;
@@ -289,6 +290,11 @@ ir_expression::ir_expression(int op, ir_rvalue *op0)
op0->type->vector_elements, 1);
break;
+ case ir_unop_i2u:
+ this->type = glsl_type::get_instance(GLSL_TYPE_UINT,
+ op0->type->vector_elements, 1);
+ break;
+
case ir_unop_noise:
this->type = glsl_type::float_type;
break;
@@ -419,6 +425,8 @@ static const char *const operator_strs[] = {
"i2b",
"b2i",
"u2f",
+ "i2u",
+ "u2i",
"any",
"trunc",
"ceil",
diff --git a/mesalib/src/glsl/ir.h b/mesalib/src/glsl/ir.h
index a41984310..42a393697 100644
--- a/mesalib/src/glsl/ir.h
+++ b/mesalib/src/glsl/ir.h
@@ -682,7 +682,7 @@ public:
class ir_assignment : public ir_instruction {
public:
- ir_assignment(ir_rvalue *lhs, ir_rvalue *rhs, ir_rvalue *condition);
+ ir_assignment(ir_rvalue *lhs, ir_rvalue *rhs, ir_rvalue *condition = NULL);
/**
* Construct an assignment with an explicit write mask
@@ -789,6 +789,8 @@ enum ir_expression_operation {
ir_unop_i2b, /**< int-to-boolean conversion */
ir_unop_b2i, /**< Boolean-to-int conversion */
ir_unop_u2f, /**< Unsigned-to-float conversion. */
+ ir_unop_i2u, /**< Integer-to-unsigned conversion. */
+ ir_unop_u2i, /**< Unsigned-to-integer conversion. */
ir_unop_any,
/**
diff --git a/mesalib/src/glsl/ir_constant_expression.cpp b/mesalib/src/glsl/ir_constant_expression.cpp
index 2a3084896..f0299a2c4 100644
--- a/mesalib/src/glsl/ir_constant_expression.cpp
+++ b/mesalib/src/glsl/ir_constant_expression.cpp
@@ -166,7 +166,18 @@ ir_expression::constant_expression_value()
data.b[c] = op[0]->value.u[c] ? true : false;
}
break;
-
+ case ir_unop_u2i:
+ assert(op[0]->type->base_type == GLSL_TYPE_UINT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.i[c] = op[0]->value.u[c];
+ }
+ break;
+ case ir_unop_i2u:
+ assert(op[0]->type->base_type == GLSL_TYPE_INT);
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ data.u[c] = op[0]->value.i[c];
+ }
+ break;
case ir_unop_any:
assert(op[0]->type->is_boolean());
data.b[0] = false;
diff --git a/mesalib/src/glsl/ir_function.cpp b/mesalib/src/glsl/ir_function.cpp
index caee9296a..ef8d4fcfc 100644
--- a/mesalib/src/glsl/ir_function.cpp
+++ b/mesalib/src/glsl/ir_function.cpp
@@ -165,6 +165,7 @@ ir_function_signature *
ir_function::matching_signature(const exec_list *actual_parameters)
{
ir_function_signature *match = NULL;
+ int matched_score;
foreach_iter(exec_list_iterator, iter, signatures) {
ir_function_signature *const sig =
@@ -173,14 +174,14 @@ ir_function::matching_signature(const exec_list *actual_parameters)
const int score = parameter_lists_match(& sig->parameters,
actual_parameters);
+ /* If we found an exact match, simply return it */
if (score == 0)
return sig;
- if (score > 0) {
- if (match != NULL)
- return NULL;
-
+ /* If we found a match with fewer conversions, use that instead */
+ if (score > 0 && (match == NULL || score < matched_score)) {
match = sig;
+ matched_score = score;
}
}
diff --git a/mesalib/src/glsl/ir_validate.cpp b/mesalib/src/glsl/ir_validate.cpp
index 7b1c19d65..f3fceb2a5 100644
--- a/mesalib/src/glsl/ir_validate.cpp
+++ b/mesalib/src/glsl/ir_validate.cpp
@@ -254,7 +254,7 @@ ir_validate::visit_leave(ir_expression *ir)
case ir_unop_f2i:
assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT);
- assert(ir->type->is_integer());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
break;
case ir_unop_i2f:
assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
@@ -269,17 +269,25 @@ ir_validate::visit_leave(ir_expression *ir)
assert(ir->type->base_type == GLSL_TYPE_FLOAT);
break;
case ir_unop_i2b:
- assert(ir->operands[0]->type->is_integer());
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
assert(ir->type->base_type == GLSL_TYPE_BOOL);
break;
case ir_unop_b2i:
assert(ir->operands[0]->type->base_type == GLSL_TYPE_BOOL);
- assert(ir->type->is_integer());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
break;
case ir_unop_u2f:
assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
assert(ir->type->base_type == GLSL_TYPE_FLOAT);
break;
+ case ir_unop_i2u:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+ case ir_unop_u2i:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
case ir_unop_any:
assert(ir->operands[0]->type->base_type == GLSL_TYPE_BOOL);
diff --git a/mesalib/src/glsl/lower_if_to_cond_assign.cpp b/mesalib/src/glsl/lower_if_to_cond_assign.cpp
index e3a1065d9..b637eb4fe 100644
--- a/mesalib/src/glsl/lower_if_to_cond_assign.cpp
+++ b/mesalib/src/glsl/lower_if_to_cond_assign.cpp
@@ -149,11 +149,9 @@ ir_visitor_status
ir_if_to_cond_assign_visitor::visit_leave(ir_if *ir)
{
/* Only flatten when beyond the GPU's maximum supported nesting depth. */
- if (this->depth <= this->max_depth)
+ if (this->depth-- <= this->max_depth)
return visit_continue;
- this->depth--;
-
bool found_control_flow = false;
ir_variable *cond_var;
ir_assignment *assign;
diff --git a/mesalib/src/glsl/lower_instructions.cpp b/mesalib/src/glsl/lower_instructions.cpp
index 6e44d1319..806f86399 100644
--- a/mesalib/src/glsl/lower_instructions.cpp
+++ b/mesalib/src/glsl/lower_instructions.cpp
@@ -1,288 +1,293 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * \file lower_instructions.cpp
- *
- * Many GPUs lack native instructions for certain expression operations, and
- * must replace them with some other expression tree. This pass lowers some
- * of the most common cases, allowing the lowering code to be implemented once
- * rather than in each driver backend.
- *
- * Currently supported transformations:
- * - SUB_TO_ADD_NEG
- * - DIV_TO_MUL_RCP
- * - EXP_TO_EXP2
- * - POW_TO_EXP2
- * - LOG_TO_LOG2
- * - MOD_TO_FRACT
- *
- * SUB_TO_ADD_NEG:
- * ---------------
- * Breaks an ir_binop_sub expression down to add(op0, neg(op1))
- *
- * This simplifies expression reassociation, and for many backends
- * there is no subtract operation separate from adding the negation.
- * For backends with native subtract operations, they will probably
- * want to recognize add(op0, neg(op1)) or the other way around to
- * produce a subtract anyway.
- *
- * DIV_TO_MUL_RCP:
- * ---------------
- * Breaks an ir_unop_div expression down to op0 * (rcp(op1)).
- *
- * Many GPUs don't have a divide instruction (945 and 965 included),
- * but they do have an RCP instruction to compute an approximate
- * reciprocal. By breaking the operation down, constant reciprocals
- * can get constant folded.
- *
- * EXP_TO_EXP2 and LOG_TO_LOG2:
- * ----------------------------
- * Many GPUs don't have a base e log or exponent instruction, but they
- * do have base 2 versions, so this pass converts exp and log to exp2
- * and log2 operations.
- *
- * POW_TO_EXP2:
- * -----------
- * Many older GPUs don't have an x**y instruction. For these GPUs, convert
- * x**y to 2**(y * log2(x)).
- *
- * MOD_TO_FRACT:
- * -------------
- * Breaks an ir_unop_mod expression down to (op1 * fract(op0 / op1))
- *
- * Many GPUs don't have a MOD instruction (945 and 965 included), and
- * if we have to break it down like this anyway, it gives an
- * opportunity to do things like constant fold the (1.0 / op1) easily.
- */
-
-#include "main/core.h" /* for M_LOG2E */
-#include "glsl_types.h"
-#include "ir.h"
-#include "ir_optimization.h"
-
-class lower_instructions_visitor : public ir_hierarchical_visitor {
-public:
- lower_instructions_visitor(unsigned lower)
- : progress(false), lower(lower) { }
-
- ir_visitor_status visit_leave(ir_expression *);
-
- bool progress;
-
-private:
- unsigned lower; /** Bitfield of which operations to lower */
-
- void sub_to_add_neg(ir_expression *);
- void div_to_mul_rcp(ir_expression *);
- void mod_to_fract(ir_expression *);
- void exp_to_exp2(ir_expression *);
- void pow_to_exp2(ir_expression *);
- void log_to_log2(ir_expression *);
-};
-
-/**
- * Determine if a particular type of lowering should occur
- */
-#define lowering(x) (this->lower & x)
-
-bool
-lower_instructions(exec_list *instructions, unsigned what_to_lower)
-{
- lower_instructions_visitor v(what_to_lower);
-
- visit_list_elements(&v, instructions);
- return v.progress;
-}
-
-void
-lower_instructions_visitor::sub_to_add_neg(ir_expression *ir)
-{
- ir->operation = ir_binop_add;
- ir->operands[1] = new(ir) ir_expression(ir_unop_neg, ir->operands[1]->type,
- ir->operands[1], NULL);
- this->progress = true;
-}
-
-void
-lower_instructions_visitor::div_to_mul_rcp(ir_expression *ir)
-{
- if (!ir->operands[1]->type->is_integer()) {
- /* New expression for the 1.0 / op1 */
- ir_rvalue *expr;
- expr = new(ir) ir_expression(ir_unop_rcp,
- ir->operands[1]->type,
- ir->operands[1],
- NULL);
-
- /* op0 / op1 -> op0 * (1.0 / op1) */
- ir->operation = ir_binop_mul;
- ir->operands[1] = expr;
- } else {
- /* Be careful with integer division -- we need to do it as a
- * float and re-truncate, since rcp(n > 1) of an integer would
- * just be 0.
- */
- ir_rvalue *op0, *op1;
- const struct glsl_type *vec_type;
-
- vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
- ir->operands[1]->type->vector_elements,
- ir->operands[1]->type->matrix_columns);
-
- if (ir->operands[1]->type->base_type == GLSL_TYPE_INT)
- op1 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[1], NULL);
- else
- op1 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[1], NULL);
-
- op1 = new(ir) ir_expression(ir_unop_rcp, op1->type, op1, NULL);
-
- vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
- ir->operands[0]->type->vector_elements,
- ir->operands[0]->type->matrix_columns);
-
- if (ir->operands[0]->type->base_type == GLSL_TYPE_INT)
- op0 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[0], NULL);
- else
- op0 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[0], NULL);
-
- op0 = new(ir) ir_expression(ir_binop_mul, vec_type, op0, op1);
-
- ir->operation = ir_unop_f2i;
- ir->operands[0] = op0;
- ir->operands[1] = NULL;
- }
-
- this->progress = true;
-}
-
-void
-lower_instructions_visitor::exp_to_exp2(ir_expression *ir)
-{
- ir_constant *log2_e = new(ir) ir_constant(float(M_LOG2E));
-
- ir->operation = ir_unop_exp2;
- ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[0]->type,
- ir->operands[0], log2_e);
- this->progress = true;
-}
-
-void
-lower_instructions_visitor::pow_to_exp2(ir_expression *ir)
-{
- ir_expression *const log2_x =
- new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
- ir->operands[0]);
-
- ir->operation = ir_unop_exp2;
- ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[1]->type,
- ir->operands[1], log2_x);
- ir->operands[1] = NULL;
- this->progress = true;
-}
-
-void
-lower_instructions_visitor::log_to_log2(ir_expression *ir)
-{
- ir->operation = ir_binop_mul;
- ir->operands[0] = new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
- ir->operands[0], NULL);
- ir->operands[1] = new(ir) ir_constant(float(1.0 / M_LOG2E));
- this->progress = true;
-}
-
-void
-lower_instructions_visitor::mod_to_fract(ir_expression *ir)
-{
- ir_variable *temp = new(ir) ir_variable(ir->operands[1]->type, "mod_b",
- ir_var_temporary);
- this->base_ir->insert_before(temp);
-
- ir_assignment *const assign =
- new(ir) ir_assignment(new(ir) ir_dereference_variable(temp),
- ir->operands[1], NULL);
-
- this->base_ir->insert_before(assign);
-
- ir_expression *const div_expr =
- new(ir) ir_expression(ir_binop_div, ir->operands[0]->type,
- ir->operands[0],
- new(ir) ir_dereference_variable(temp));
-
- /* Don't generate new IR that would need to be lowered in an additional
- * pass.
- */
- if (lowering(DIV_TO_MUL_RCP))
- div_to_mul_rcp(div_expr);
-
- ir_rvalue *expr = new(ir) ir_expression(ir_unop_fract,
- ir->operands[0]->type,
- div_expr,
- NULL);
-
- ir->operation = ir_binop_mul;
- ir->operands[0] = new(ir) ir_dereference_variable(temp);
- ir->operands[1] = expr;
- this->progress = true;
-}
-
-ir_visitor_status
-lower_instructions_visitor::visit_leave(ir_expression *ir)
-{
- switch (ir->operation) {
- case ir_binop_sub:
- if (lowering(SUB_TO_ADD_NEG))
- sub_to_add_neg(ir);
- break;
-
- case ir_binop_div:
- if (lowering(DIV_TO_MUL_RCP))
- div_to_mul_rcp(ir);
- break;
-
- case ir_unop_exp:
- if (lowering(EXP_TO_EXP2))
- exp_to_exp2(ir);
- break;
-
- case ir_unop_log:
- if (lowering(LOG_TO_LOG2))
- log_to_log2(ir);
- break;
-
- case ir_binop_mod:
- if (lowering(MOD_TO_FRACT))
- mod_to_fract(ir);
- break;
-
- case ir_binop_pow:
- if (lowering(POW_TO_EXP2))
- pow_to_exp2(ir);
- break;
-
- default:
- return visit_continue;
- }
-
- return visit_continue;
-}
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_instructions.cpp
+ *
+ * Many GPUs lack native instructions for certain expression operations, and
+ * must replace them with some other expression tree. This pass lowers some
+ * of the most common cases, allowing the lowering code to be implemented once
+ * rather than in each driver backend.
+ *
+ * Currently supported transformations:
+ * - SUB_TO_ADD_NEG
+ * - DIV_TO_MUL_RCP
+ * - EXP_TO_EXP2
+ * - POW_TO_EXP2
+ * - LOG_TO_LOG2
+ * - MOD_TO_FRACT
+ *
+ * SUB_TO_ADD_NEG:
+ * ---------------
+ * Breaks an ir_binop_sub expression down to add(op0, neg(op1))
+ *
+ * This simplifies expression reassociation, and for many backends
+ * there is no subtract operation separate from adding the negation.
+ * For backends with native subtract operations, they will probably
+ * want to recognize add(op0, neg(op1)) or the other way around to
+ * produce a subtract anyway.
+ *
+ * DIV_TO_MUL_RCP:
+ * ---------------
+ * Breaks an ir_unop_div expression down to op0 * (rcp(op1)).
+ *
+ * Many GPUs don't have a divide instruction (945 and 965 included),
+ * but they do have an RCP instruction to compute an approximate
+ * reciprocal. By breaking the operation down, constant reciprocals
+ * can get constant folded.
+ *
+ * EXP_TO_EXP2 and LOG_TO_LOG2:
+ * ----------------------------
+ * Many GPUs don't have a base e log or exponent instruction, but they
+ * do have base 2 versions, so this pass converts exp and log to exp2
+ * and log2 operations.
+ *
+ * POW_TO_EXP2:
+ * -----------
+ * Many older GPUs don't have an x**y instruction. For these GPUs, convert
+ * x**y to 2**(y * log2(x)).
+ *
+ * MOD_TO_FRACT:
+ * -------------
+ * Breaks an ir_unop_mod expression down to (op1 * fract(op0 / op1))
+ *
+ * Many GPUs don't have a MOD instruction (945 and 965 included), and
+ * if we have to break it down like this anyway, it gives an
+ * opportunity to do things like constant fold the (1.0 / op1) easily.
+ */
+
+#include "main/core.h" /* for M_LOG2E */
+#include "glsl_types.h"
+#include "ir.h"
+#include "ir_optimization.h"
+
+class lower_instructions_visitor : public ir_hierarchical_visitor {
+public:
+ lower_instructions_visitor(unsigned lower)
+ : progress(false), lower(lower) { }
+
+ ir_visitor_status visit_leave(ir_expression *);
+
+ bool progress;
+
+private:
+ unsigned lower; /** Bitfield of which operations to lower */
+
+ void sub_to_add_neg(ir_expression *);
+ void div_to_mul_rcp(ir_expression *);
+ void mod_to_fract(ir_expression *);
+ void exp_to_exp2(ir_expression *);
+ void pow_to_exp2(ir_expression *);
+ void log_to_log2(ir_expression *);
+};
+
+/**
+ * Determine if a particular type of lowering should occur
+ */
+#define lowering(x) (this->lower & x)
+
+bool
+lower_instructions(exec_list *instructions, unsigned what_to_lower)
+{
+ lower_instructions_visitor v(what_to_lower);
+
+ visit_list_elements(&v, instructions);
+ return v.progress;
+}
+
+void
+lower_instructions_visitor::sub_to_add_neg(ir_expression *ir)
+{
+ ir->operation = ir_binop_add;
+ ir->operands[1] = new(ir) ir_expression(ir_unop_neg, ir->operands[1]->type,
+ ir->operands[1], NULL);
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::div_to_mul_rcp(ir_expression *ir)
+{
+ if (!ir->operands[1]->type->is_integer()) {
+ /* New expression for the 1.0 / op1 */
+ ir_rvalue *expr;
+ expr = new(ir) ir_expression(ir_unop_rcp,
+ ir->operands[1]->type,
+ ir->operands[1],
+ NULL);
+
+ /* op0 / op1 -> op0 * (1.0 / op1) */
+ ir->operation = ir_binop_mul;
+ ir->operands[1] = expr;
+ } else {
+ /* Be careful with integer division -- we need to do it as a
+ * float and re-truncate, since rcp(n > 1) of an integer would
+ * just be 0.
+ */
+ ir_rvalue *op0, *op1;
+ const struct glsl_type *vec_type;
+
+ vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
+ ir->operands[1]->type->vector_elements,
+ ir->operands[1]->type->matrix_columns);
+
+ if (ir->operands[1]->type->base_type == GLSL_TYPE_INT)
+ op1 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[1], NULL);
+ else
+ op1 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[1], NULL);
+
+ op1 = new(ir) ir_expression(ir_unop_rcp, op1->type, op1, NULL);
+
+ vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
+ ir->operands[0]->type->vector_elements,
+ ir->operands[0]->type->matrix_columns);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_INT)
+ op0 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[0], NULL);
+ else
+ op0 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[0], NULL);
+
+ op0 = new(ir) ir_expression(ir_binop_mul, vec_type, op0, op1);
+
+ if (ir->operands[1]->type->base_type == GLSL_TYPE_INT) {
+ ir->operation = ir_unop_f2i;
+ ir->operands[0] = op0;
+ } else {
+ ir->operation = ir_unop_i2u;
+ ir->operands[0] = new(ir) ir_expression(ir_unop_f2i, op0);
+ }
+ ir->operands[1] = NULL;
+ }
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::exp_to_exp2(ir_expression *ir)
+{
+ ir_constant *log2_e = new(ir) ir_constant(float(M_LOG2E));
+
+ ir->operation = ir_unop_exp2;
+ ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[0]->type,
+ ir->operands[0], log2_e);
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::pow_to_exp2(ir_expression *ir)
+{
+ ir_expression *const log2_x =
+ new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
+ ir->operands[0]);
+
+ ir->operation = ir_unop_exp2;
+ ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[1]->type,
+ ir->operands[1], log2_x);
+ ir->operands[1] = NULL;
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::log_to_log2(ir_expression *ir)
+{
+ ir->operation = ir_binop_mul;
+ ir->operands[0] = new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
+ ir->operands[0], NULL);
+ ir->operands[1] = new(ir) ir_constant(float(1.0 / M_LOG2E));
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::mod_to_fract(ir_expression *ir)
+{
+ ir_variable *temp = new(ir) ir_variable(ir->operands[1]->type, "mod_b",
+ ir_var_temporary);
+ this->base_ir->insert_before(temp);
+
+ ir_assignment *const assign =
+ new(ir) ir_assignment(new(ir) ir_dereference_variable(temp),
+ ir->operands[1], NULL);
+
+ this->base_ir->insert_before(assign);
+
+ ir_expression *const div_expr =
+ new(ir) ir_expression(ir_binop_div, ir->operands[0]->type,
+ ir->operands[0],
+ new(ir) ir_dereference_variable(temp));
+
+ /* Don't generate new IR that would need to be lowered in an additional
+ * pass.
+ */
+ if (lowering(DIV_TO_MUL_RCP))
+ div_to_mul_rcp(div_expr);
+
+ ir_rvalue *expr = new(ir) ir_expression(ir_unop_fract,
+ ir->operands[0]->type,
+ div_expr,
+ NULL);
+
+ ir->operation = ir_binop_mul;
+ ir->operands[0] = new(ir) ir_dereference_variable(temp);
+ ir->operands[1] = expr;
+ this->progress = true;
+}
+
+ir_visitor_status
+lower_instructions_visitor::visit_leave(ir_expression *ir)
+{
+ switch (ir->operation) {
+ case ir_binop_sub:
+ if (lowering(SUB_TO_ADD_NEG))
+ sub_to_add_neg(ir);
+ break;
+
+ case ir_binop_div:
+ if (lowering(DIV_TO_MUL_RCP))
+ div_to_mul_rcp(ir);
+ break;
+
+ case ir_unop_exp:
+ if (lowering(EXP_TO_EXP2))
+ exp_to_exp2(ir);
+ break;
+
+ case ir_unop_log:
+ if (lowering(LOG_TO_LOG2))
+ log_to_log2(ir);
+ break;
+
+ case ir_binop_mod:
+ if (lowering(MOD_TO_FRACT) && ir->type->is_float())
+ mod_to_fract(ir);
+ break;
+
+ case ir_binop_pow:
+ if (lowering(POW_TO_EXP2))
+ pow_to_exp2(ir);
+ break;
+
+ default:
+ return visit_continue;
+ }
+
+ return visit_continue;
+}
diff --git a/mesalib/src/glsl/lower_mat_op_to_vec.cpp b/mesalib/src/glsl/lower_mat_op_to_vec.cpp
index 8cbbfa713..a371afc14 100644
--- a/mesalib/src/glsl/lower_mat_op_to_vec.cpp
+++ b/mesalib/src/glsl/lower_mat_op_to_vec.cpp
@@ -45,19 +45,19 @@ public:
ir_visitor_status visit_leave(ir_assignment *);
- ir_dereference *get_column(ir_variable *var, int col);
- ir_rvalue *get_element(ir_variable *var, int col, int row);
-
- void do_mul_mat_mat(ir_variable *result_var,
- ir_variable *a_var, ir_variable *b_var);
- void do_mul_mat_vec(ir_variable *result_var,
- ir_variable *a_var, ir_variable *b_var);
- void do_mul_vec_mat(ir_variable *result_var,
- ir_variable *a_var, ir_variable *b_var);
- void do_mul_mat_scalar(ir_variable *result_var,
- ir_variable *a_var, ir_variable *b_var);
- void do_equal_mat_mat(ir_variable *result_var, ir_variable *a_var,
- ir_variable *b_var, bool test_equal);
+ ir_dereference *get_column(ir_dereference *val, int col);
+ ir_rvalue *get_element(ir_dereference *val, int col, int row);
+
+ void do_mul_mat_mat(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_mul_mat_vec(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_mul_vec_mat(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_mul_mat_scalar(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_equal_mat_mat(ir_dereference *result, ir_dereference *a,
+ ir_dereference *b, bool test_equal);
void *mem_ctx;
bool made_progress;
@@ -97,182 +97,137 @@ do_mat_op_to_vec(exec_list *instructions)
}
ir_rvalue *
-ir_mat_op_to_vec_visitor::get_element(ir_variable *var, int col, int row)
+ir_mat_op_to_vec_visitor::get_element(ir_dereference *val, int col, int row)
{
- ir_dereference *deref;
+ val = get_column(val, col);
- deref = new(mem_ctx) ir_dereference_variable(var);
-
- if (var->type->is_matrix()) {
- deref = new(mem_ctx) ir_dereference_array(var,
- new(mem_ctx) ir_constant(col));
- } else {
- assert(col == 0);
- }
-
- return new(mem_ctx) ir_swizzle(deref, row, 0, 0, 0, 1);
+ return new(mem_ctx) ir_swizzle(val, row, 0, 0, 0, 1);
}
ir_dereference *
-ir_mat_op_to_vec_visitor::get_column(ir_variable *var, int row)
+ir_mat_op_to_vec_visitor::get_column(ir_dereference *val, int row)
{
- ir_dereference *deref;
-
- if (!var->type->is_matrix()) {
- deref = new(mem_ctx) ir_dereference_variable(var);
- } else {
- deref = new(mem_ctx) ir_dereference_variable(var);
- deref = new(mem_ctx) ir_dereference_array(deref,
- new(mem_ctx) ir_constant(row));
+ val = val->clone(mem_ctx, NULL);
+
+ if (val->type->is_matrix()) {
+ val = new(mem_ctx) ir_dereference_array(val,
+ new(mem_ctx) ir_constant(row));
}
- return deref;
+ return val;
}
void
-ir_mat_op_to_vec_visitor::do_mul_mat_mat(ir_variable *result_var,
- ir_variable *a_var,
- ir_variable *b_var)
+ir_mat_op_to_vec_visitor::do_mul_mat_mat(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
{
int b_col, i;
ir_assignment *assign;
ir_expression *expr;
- for (b_col = 0; b_col < b_var->type->matrix_columns; b_col++) {
- ir_rvalue *a = get_column(a_var, 0);
- ir_rvalue *b = get_element(b_var, b_col, 0);
-
+ for (b_col = 0; b_col < b->type->matrix_columns; b_col++) {
/* first column */
expr = new(mem_ctx) ir_expression(ir_binop_mul,
- a->type,
- a,
- b);
+ get_column(a, 0),
+ get_element(b, b_col, 0));
/* following columns */
- for (i = 1; i < a_var->type->matrix_columns; i++) {
+ for (i = 1; i < a->type->matrix_columns; i++) {
ir_expression *mul_expr;
- a = get_column(a_var, i);
- b = get_element(b_var, b_col, i);
-
mul_expr = new(mem_ctx) ir_expression(ir_binop_mul,
- a->type,
- a,
- b);
+ get_column(a, i),
+ get_element(b, b_col, i));
expr = new(mem_ctx) ir_expression(ir_binop_add,
- a->type,
expr,
mul_expr);
}
- ir_rvalue *result = get_column(result_var, b_col);
- assign = new(mem_ctx) ir_assignment(result,
- expr,
- NULL);
+ assign = new(mem_ctx) ir_assignment(get_column(result, b_col), expr);
base_ir->insert_before(assign);
}
}
void
-ir_mat_op_to_vec_visitor::do_mul_mat_vec(ir_variable *result_var,
- ir_variable *a_var,
- ir_variable *b_var)
+ir_mat_op_to_vec_visitor::do_mul_mat_vec(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
{
int i;
- ir_rvalue *a = get_column(a_var, 0);
- ir_rvalue *b = get_element(b_var, 0, 0);
ir_assignment *assign;
ir_expression *expr;
/* first column */
expr = new(mem_ctx) ir_expression(ir_binop_mul,
- result_var->type,
- a,
- b);
+ get_column(a, 0),
+ get_element(b, 0, 0));
/* following columns */
- for (i = 1; i < a_var->type->matrix_columns; i++) {
+ for (i = 1; i < a->type->matrix_columns; i++) {
ir_expression *mul_expr;
- a = get_column(a_var, i);
- b = get_element(b_var, 0, i);
-
mul_expr = new(mem_ctx) ir_expression(ir_binop_mul,
- result_var->type,
- a,
- b);
- expr = new(mem_ctx) ir_expression(ir_binop_add,
- result_var->type,
- expr,
- mul_expr);
+ get_column(a, i),
+ get_element(b, 0, i));
+ expr = new(mem_ctx) ir_expression(ir_binop_add, expr, mul_expr);
}
- ir_rvalue *result = new(mem_ctx) ir_dereference_variable(result_var);
- assign = new(mem_ctx) ir_assignment(result,
- expr,
- NULL);
+ result = result->clone(mem_ctx, NULL);
+ assign = new(mem_ctx) ir_assignment(result, expr);
base_ir->insert_before(assign);
}
void
-ir_mat_op_to_vec_visitor::do_mul_vec_mat(ir_variable *result_var,
- ir_variable *a_var,
- ir_variable *b_var)
+ir_mat_op_to_vec_visitor::do_mul_vec_mat(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
{
int i;
- for (i = 0; i < b_var->type->matrix_columns; i++) {
- ir_rvalue *a = new(mem_ctx) ir_dereference_variable(a_var);
- ir_rvalue *b = get_column(b_var, i);
- ir_rvalue *result;
+ for (i = 0; i < b->type->matrix_columns; i++) {
+ ir_rvalue *column_result;
ir_expression *column_expr;
ir_assignment *column_assign;
- result = new(mem_ctx) ir_dereference_variable(result_var);
- result = new(mem_ctx) ir_swizzle(result, i, 0, 0, 0, 1);
+ column_result = result->clone(mem_ctx, NULL);
+ column_result = new(mem_ctx) ir_swizzle(column_result, i, 0, 0, 0, 1);
column_expr = new(mem_ctx) ir_expression(ir_binop_dot,
- result->type,
- a,
- b);
+ a->clone(mem_ctx, NULL),
+ get_column(b, i));
- column_assign = new(mem_ctx) ir_assignment(result,
- column_expr,
- NULL);
+ column_assign = new(mem_ctx) ir_assignment(column_result,
+ column_expr);
base_ir->insert_before(column_assign);
}
}
void
-ir_mat_op_to_vec_visitor::do_mul_mat_scalar(ir_variable *result_var,
- ir_variable *a_var,
- ir_variable *b_var)
+ir_mat_op_to_vec_visitor::do_mul_mat_scalar(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
{
int i;
- for (i = 0; i < a_var->type->matrix_columns; i++) {
- ir_rvalue *a = get_column(a_var, i);
- ir_rvalue *b = new(mem_ctx) ir_dereference_variable(b_var);
- ir_rvalue *result = get_column(result_var, i);
+ for (i = 0; i < a->type->matrix_columns; i++) {
ir_expression *column_expr;
ir_assignment *column_assign;
column_expr = new(mem_ctx) ir_expression(ir_binop_mul,
- result->type,
- a,
- b);
+ get_column(a, i),
+ b->clone(mem_ctx, NULL));
- column_assign = new(mem_ctx) ir_assignment(result,
- column_expr,
- NULL);
+ column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
+ column_expr);
base_ir->insert_before(column_assign);
}
}
void
-ir_mat_op_to_vec_visitor::do_equal_mat_mat(ir_variable *result_var,
- ir_variable *a_var,
- ir_variable *b_var,
+ir_mat_op_to_vec_visitor::do_equal_mat_mat(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b,
bool test_equal)
{
/* This essentially implements the following GLSL:
@@ -293,7 +248,7 @@ ir_mat_op_to_vec_visitor::do_equal_mat_mat(ir_variable *result_var,
* a[3] != b[3]);
* }
*/
- const unsigned columns = a_var->type->matrix_columns;
+ const unsigned columns = a->type->matrix_columns;
const glsl_type *const bvec_type =
glsl_type::get_instance(GLSL_TYPE_BOOL, columns, 1);
@@ -303,12 +258,10 @@ ir_mat_op_to_vec_visitor::do_equal_mat_mat(ir_variable *result_var,
this->base_ir->insert_before(tmp_bvec);
for (unsigned i = 0; i < columns; i++) {
- ir_dereference *const op0 = get_column(a_var, i);
- ir_dereference *const op1 = get_column(b_var, i);
-
ir_expression *const cmp =
new(this->mem_ctx) ir_expression(ir_binop_any_nequal,
- glsl_type::bool_type, op0, op1);
+ get_column(a, i),
+ get_column(b, i));
ir_dereference *const lhs =
new(this->mem_ctx) ir_dereference_variable(tmp_bvec);
@@ -319,23 +272,14 @@ ir_mat_op_to_vec_visitor::do_equal_mat_mat(ir_variable *result_var,
this->base_ir->insert_before(assign);
}
- ir_rvalue *const val =
- new(this->mem_ctx) ir_dereference_variable(tmp_bvec);
-
- ir_expression *any =
- new(this->mem_ctx) ir_expression(ir_unop_any, glsl_type::bool_type,
- val, NULL);
+ ir_rvalue *const val = new(this->mem_ctx) ir_dereference_variable(tmp_bvec);
+ ir_expression *any = new(this->mem_ctx) ir_expression(ir_unop_any, val);
if (test_equal)
- any = new(this->mem_ctx) ir_expression(ir_unop_logic_not,
- glsl_type::bool_type,
- any, NULL);
-
- ir_rvalue *const result =
- new(this->mem_ctx) ir_dereference_variable(result_var);
+ any = new(this->mem_ctx) ir_expression(ir_unop_logic_not, any);
ir_assignment *const assign =
- new(mem_ctx) ir_assignment(result, any, NULL);
+ new(mem_ctx) ir_assignment(result->clone(mem_ctx, NULL), any);
base_ir->insert_before(assign);
}
@@ -358,7 +302,7 @@ ir_mat_op_to_vec_visitor::visit_leave(ir_assignment *orig_assign)
{
ir_expression *orig_expr = orig_assign->rhs->as_expression();
unsigned int i, matrix_columns = 1;
- ir_variable *op_var[2];
+ ir_dereference *op[2];
if (!orig_expr)
return visit_continue;
@@ -370,51 +314,53 @@ ir_mat_op_to_vec_visitor::visit_leave(ir_assignment *orig_assign)
mem_ctx = ralloc_parent(orig_assign);
- ir_dereference_variable *lhs_deref =
+ ir_dereference_variable *result =
orig_assign->lhs->as_dereference_variable();
- assert(lhs_deref);
-
- ir_variable *result_var = lhs_deref->var;
+ assert(result);
/* Store the expression operands in temps so we can use them
* multiple times.
*/
for (i = 0; i < orig_expr->get_num_operands(); i++) {
ir_assignment *assign;
+ ir_dereference *deref = orig_expr->operands[i]->as_dereference();
- op_var[i] = new(mem_ctx) ir_variable(orig_expr->operands[i]->type,
- "mat_op_to_vec",
- ir_var_temporary);
- base_ir->insert_before(op_var[i]);
+ /* Avoid making a temporary if we don't need to to avoid aliasing. */
+ if (deref &&
+ deref->variable_referenced() != result->variable_referenced()) {
+ op[i] = deref;
+ continue;
+ }
- lhs_deref = new(mem_ctx) ir_dereference_variable(op_var[i]);
- assign = new(mem_ctx) ir_assignment(lhs_deref,
- orig_expr->operands[i],
- NULL);
+ /* Otherwise, store the operand in a temporary generally if it's
+ * not a dereference.
+ */
+ ir_variable *var = new(mem_ctx) ir_variable(orig_expr->operands[i]->type,
+ "mat_op_to_vec",
+ ir_var_temporary);
+ base_ir->insert_before(var);
+
+ /* Note that we use this dereference for the assignment. That means
+ * that others that want to use op[i] have to clone the deref.
+ */
+ op[i] = new(mem_ctx) ir_dereference_variable(var);
+ assign = new(mem_ctx) ir_assignment(op[i], orig_expr->operands[i]);
base_ir->insert_before(assign);
}
/* OK, time to break down this matrix operation. */
switch (orig_expr->operation) {
case ir_unop_neg: {
- const unsigned mask = (1U << result_var->type->vector_elements) - 1;
-
/* Apply the operation to each column.*/
for (i = 0; i < matrix_columns; i++) {
- ir_rvalue *op0 = get_column(op_var[0], i);
- ir_dereference *result = get_column(result_var, i);
ir_expression *column_expr;
ir_assignment *column_assign;
column_expr = new(mem_ctx) ir_expression(orig_expr->operation,
- result->type,
- op0,
- NULL);
-
- column_assign = new(mem_ctx) ir_assignment(result,
- column_expr,
- NULL,
- mask);
+ get_column(op[0], i));
+
+ column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
+ column_expr);
assert(column_assign->write_mask != 0);
base_ir->insert_before(column_assign);
}
@@ -424,57 +370,49 @@ ir_mat_op_to_vec_visitor::visit_leave(ir_assignment *orig_assign)
case ir_binop_sub:
case ir_binop_div:
case ir_binop_mod: {
- const unsigned mask = (1U << result_var->type->vector_elements) - 1;
-
/* For most operations, the matrix version is just going
* column-wise through and applying the operation to each column
* if available.
*/
for (i = 0; i < matrix_columns; i++) {
- ir_rvalue *op0 = get_column(op_var[0], i);
- ir_rvalue *op1 = get_column(op_var[1], i);
- ir_dereference *result = get_column(result_var, i);
ir_expression *column_expr;
ir_assignment *column_assign;
column_expr = new(mem_ctx) ir_expression(orig_expr->operation,
- result->type,
- op0,
- op1);
-
- column_assign = new(mem_ctx) ir_assignment(result,
- column_expr,
- NULL,
- mask);
+ get_column(op[0], i),
+ get_column(op[1], i));
+
+ column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
+ column_expr);
assert(column_assign->write_mask != 0);
base_ir->insert_before(column_assign);
}
break;
}
case ir_binop_mul:
- if (op_var[0]->type->is_matrix()) {
- if (op_var[1]->type->is_matrix()) {
- do_mul_mat_mat(result_var, op_var[0], op_var[1]);
- } else if (op_var[1]->type->is_vector()) {
- do_mul_mat_vec(result_var, op_var[0], op_var[1]);
+ if (op[0]->type->is_matrix()) {
+ if (op[1]->type->is_matrix()) {
+ do_mul_mat_mat(result, op[0], op[1]);
+ } else if (op[1]->type->is_vector()) {
+ do_mul_mat_vec(result, op[0], op[1]);
} else {
- assert(op_var[1]->type->is_scalar());
- do_mul_mat_scalar(result_var, op_var[0], op_var[1]);
+ assert(op[1]->type->is_scalar());
+ do_mul_mat_scalar(result, op[0], op[1]);
}
} else {
- assert(op_var[1]->type->is_matrix());
- if (op_var[0]->type->is_vector()) {
- do_mul_vec_mat(result_var, op_var[0], op_var[1]);
+ assert(op[1]->type->is_matrix());
+ if (op[0]->type->is_vector()) {
+ do_mul_vec_mat(result, op[0], op[1]);
} else {
- assert(op_var[0]->type->is_scalar());
- do_mul_mat_scalar(result_var, op_var[1], op_var[0]);
+ assert(op[0]->type->is_scalar());
+ do_mul_mat_scalar(result, op[1], op[0]);
}
}
break;
case ir_binop_all_equal:
case ir_binop_any_nequal:
- do_equal_mat_mat(result_var, op_var[1], op_var[0],
+ do_equal_mat_mat(result, op[1], op[0],
(orig_expr->operation == ir_binop_all_equal));
break;