aboutsummaryrefslogtreecommitdiff
path: root/mesalib/src
diff options
context:
space:
mode:
authormarha <marha@users.sourceforge.net>2012-04-30 11:11:16 +0200
committermarha <marha@users.sourceforge.net>2012-04-30 11:11:16 +0200
commit0f0386d4e90cb0769eddc04b18742d5a9a72ea05 (patch)
treea8a5fb11e78c4afe6637c5adfa0d2224208f7314 /mesalib/src
parentcd27f58626705bcb561115b8e5b0d1430df83fa6 (diff)
parent762b7fde3d57d3a151f98535fd31516b7e823bc0 (diff)
downloadvcxsrv-0f0386d4e90cb0769eddc04b18742d5a9a72ea05.tar.gz
vcxsrv-0f0386d4e90cb0769eddc04b18742d5a9a72ea05.tar.bz2
vcxsrv-0f0386d4e90cb0769eddc04b18742d5a9a72ea05.zip
Merge remote-tracking branch 'origin/released'
Conflicts: pixman/pixman/pixman-mmx.c xorg-server/glx/glapi_gentable.c
Diffstat (limited to 'mesalib/src')
-rw-r--r--mesalib/src/gallium/auxiliary/util/u_blitter.c1
-rw-r--r--mesalib/src/gallium/auxiliary/util/u_draw_quad.c269
-rw-r--r--mesalib/src/gallium/auxiliary/util/u_vbuf.c728
-rw-r--r--mesalib/src/gallium/auxiliary/util/u_vbuf.h105
-rw-r--r--mesalib/src/glsl/Makefile2
-rw-r--r--mesalib/src/glsl/ir_reader.cpp2
-rw-r--r--mesalib/src/glsl/main.cpp9
-rw-r--r--mesalib/src/glsl/opt_dead_functions.cpp2
-rw-r--r--mesalib/src/glsl/opt_structure_splitting.cpp1
-rw-r--r--mesalib/src/mesa/main/version.c1
-rw-r--r--mesalib/src/mesa/state_tracker/st_cb_drawpixels.c10
-rw-r--r--mesalib/src/mesa/state_tracker/st_context.c32
-rw-r--r--mesalib/src/mesa/state_tracker/st_context.h3
-rw-r--r--mesalib/src/mesa/state_tracker/st_draw.c16
-rw-r--r--mesalib/src/mesa/state_tracker/st_extensions.c8
-rw-r--r--mesalib/src/mesa/vbo/vbo_exec.h3
-rw-r--r--mesalib/src/mesa/vbo/vbo_exec_array.c38
-rw-r--r--mesalib/src/mesa/vbo/vbo_exec_draw.c2
-rw-r--r--mesalib/src/mesa/vbo/vbo_save_draw.c2
19 files changed, 602 insertions, 632 deletions
diff --git a/mesalib/src/gallium/auxiliary/util/u_blitter.c b/mesalib/src/gallium/auxiliary/util/u_blitter.c
index 6a9141209..d0b9187d1 100644
--- a/mesalib/src/gallium/auxiliary/util/u_blitter.c
+++ b/mesalib/src/gallium/auxiliary/util/u_blitter.c
@@ -733,6 +733,7 @@ static void blitter_draw(struct blitter_context_priv *ctx,
u_upload_unmap(ctx->upload);
util_draw_vertex_buffer(ctx->base.pipe, NULL, buf, offset,
PIPE_PRIM_TRIANGLE_FAN, 4, 2);
+ pipe_resource_reference(&buf, NULL);
}
static void blitter_draw_rectangle(struct blitter_context *blitter,
diff --git a/mesalib/src/gallium/auxiliary/util/u_draw_quad.c b/mesalib/src/gallium/auxiliary/util/u_draw_quad.c
index 371fefb9e..590fa0c36 100644
--- a/mesalib/src/gallium/auxiliary/util/u_draw_quad.c
+++ b/mesalib/src/gallium/auxiliary/util/u_draw_quad.c
@@ -1,135 +1,134 @@
-/**************************************************************************
- *
- * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "pipe/p_context.h"
-#include "pipe/p_defines.h"
-#include "util/u_inlines.h"
-#include "util/u_draw_quad.h"
-#include "util/u_memory.h"
-#include "cso_cache/cso_context.h"
-
-
-/**
- * Draw a simple vertex buffer / primitive.
- * Limited to float[4] vertex attribs, tightly packed.
- */
-void
-util_draw_vertex_buffer(struct pipe_context *pipe,
- struct cso_context *cso,
- struct pipe_resource *vbuf,
- uint offset,
- uint prim_type,
- uint num_verts,
- uint num_attribs)
-{
- struct pipe_vertex_buffer vbuffer;
-
- assert(num_attribs <= PIPE_MAX_ATTRIBS);
-
- /* tell pipe about the vertex buffer */
- memset(&vbuffer, 0, sizeof(vbuffer));
- vbuffer.buffer = vbuf;
- vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
- vbuffer.buffer_offset = offset;
-
- if (cso) {
- cso_set_vertex_buffers(cso, 1, &vbuffer);
- } else {
- pipe->set_vertex_buffers(pipe, 1, &vbuffer);
- }
-
- /* note: vertex elements already set by caller */
-
- /* draw */
- util_draw_arrays(pipe, prim_type, 0, num_verts);
-}
-
-
-
-/**
- * Draw screen-aligned textured quad.
- * Note: this isn't especially efficient.
- */
-void
-util_draw_texquad(struct pipe_context *pipe, struct cso_context *cso,
- float x0, float y0, float x1, float y1, float z)
-{
- uint numAttribs = 2, i, j;
- uint vertexBytes = 4 * (4 * numAttribs * sizeof(float));
- struct pipe_resource *vbuf = NULL;
- float *v = NULL;
-
- v = MALLOC(vertexBytes);
- if (v == NULL)
- goto out;
-
- /*
- * Load vertex buffer
- */
- for (i = j = 0; i < 4; i++) {
- v[j + 2] = z; /* z */
- v[j + 3] = 1.0; /* w */
- v[j + 6] = 0.0; /* r */
- v[j + 7] = 1.0; /* q */
- j += 8;
- }
-
- v[0] = x0;
- v[1] = y0;
- v[4] = 0.0; /*s*/
- v[5] = 0.0; /*t*/
-
- v[8] = x1;
- v[9] = y0;
- v[12] = 1.0;
- v[13] = 0.0;
-
- v[16] = x1;
- v[17] = y1;
- v[20] = 1.0;
- v[21] = 1.0;
-
- v[24] = x0;
- v[25] = y1;
- v[28] = 0.0;
- v[29] = 1.0;
-
- vbuf = pipe_user_buffer_create(pipe->screen, v, vertexBytes,
- PIPE_BIND_VERTEX_BUFFER);
- if (!vbuf)
- goto out;
-
- util_draw_vertex_buffer(pipe, cso, vbuf, 0, PIPE_PRIM_TRIANGLE_FAN, 4, 2);
-
-out:
- if (vbuf)
- pipe_resource_reference(&vbuf, NULL);
-
- if (v)
- FREE(v);
-}
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "util/u_inlines.h"
+#include "util/u_draw_quad.h"
+#include "util/u_memory.h"
+#include "cso_cache/cso_context.h"
+
+
+/**
+ * Draw a simple vertex buffer / primitive.
+ * Limited to float[4] vertex attribs, tightly packed.
+ */
+void
+util_draw_vertex_buffer(struct pipe_context *pipe,
+ struct cso_context *cso,
+ struct pipe_resource *vbuf,
+ uint offset,
+ uint prim_type,
+ uint num_verts,
+ uint num_attribs)
+{
+ struct pipe_vertex_buffer vbuffer;
+
+ assert(num_attribs <= PIPE_MAX_ATTRIBS);
+
+ /* tell pipe about the vertex buffer */
+ memset(&vbuffer, 0, sizeof(vbuffer));
+ vbuffer.buffer = vbuf;
+ vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
+ vbuffer.buffer_offset = offset;
+
+ /* note: vertex elements already set by caller */
+
+ if (cso) {
+ cso_set_vertex_buffers(cso, 1, &vbuffer);
+ cso_draw_arrays(cso, prim_type, 0, num_verts);
+ } else {
+ pipe->set_vertex_buffers(pipe, 1, &vbuffer);
+ util_draw_arrays(pipe, prim_type, 0, num_verts);
+ }
+}
+
+
+
+/**
+ * Draw screen-aligned textured quad.
+ * Note: this isn't especially efficient.
+ */
+void
+util_draw_texquad(struct pipe_context *pipe, struct cso_context *cso,
+ float x0, float y0, float x1, float y1, float z)
+{
+ uint numAttribs = 2, i, j;
+ uint vertexBytes = 4 * (4 * numAttribs * sizeof(float));
+ struct pipe_resource *vbuf = NULL;
+ float *v = NULL;
+
+ v = MALLOC(vertexBytes);
+ if (v == NULL)
+ goto out;
+
+ /*
+ * Load vertex buffer
+ */
+ for (i = j = 0; i < 4; i++) {
+ v[j + 2] = z; /* z */
+ v[j + 3] = 1.0; /* w */
+ v[j + 6] = 0.0; /* r */
+ v[j + 7] = 1.0; /* q */
+ j += 8;
+ }
+
+ v[0] = x0;
+ v[1] = y0;
+ v[4] = 0.0; /*s*/
+ v[5] = 0.0; /*t*/
+
+ v[8] = x1;
+ v[9] = y0;
+ v[12] = 1.0;
+ v[13] = 0.0;
+
+ v[16] = x1;
+ v[17] = y1;
+ v[20] = 1.0;
+ v[21] = 1.0;
+
+ v[24] = x0;
+ v[25] = y1;
+ v[28] = 0.0;
+ v[29] = 1.0;
+
+ vbuf = pipe_user_buffer_create(pipe->screen, v, vertexBytes,
+ PIPE_BIND_VERTEX_BUFFER);
+ if (!vbuf)
+ goto out;
+
+ util_draw_vertex_buffer(pipe, cso, vbuf, 0, PIPE_PRIM_TRIANGLE_FAN, 4, 2);
+
+out:
+ if (vbuf)
+ pipe_resource_reference(&vbuf, NULL);
+
+ if (v)
+ FREE(v);
+}
diff --git a/mesalib/src/gallium/auxiliary/util/u_vbuf.c b/mesalib/src/gallium/auxiliary/util/u_vbuf.c
index 50c35afd5..401c8a4a8 100644
--- a/mesalib/src/gallium/auxiliary/util/u_vbuf.c
+++ b/mesalib/src/gallium/auxiliary/util/u_vbuf.c
@@ -52,9 +52,23 @@ struct u_vbuf_elements {
/* This might mean two things:
* - src_format != native_format, as discussed above.
* - src_offset % 4 != 0 (if the caps don't allow such an offset). */
- boolean incompatible_layout;
- /* Per-element flags. */
- boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS];
+ uint32_t incompatible_elem_mask; /* each bit describes a corresp. attrib */
+ /* Which buffer has at least one vertex element referencing it
+ * incompatible. */
+ uint32_t incompatible_vb_mask_any;
+ /* Which buffer has all vertex elements referencing it incompatible. */
+ uint32_t incompatible_vb_mask_all;
+ /* Which buffer has at least one vertex element referencing it
+ * compatible. */
+ uint32_t compatible_vb_mask_any;
+ /* Which buffer has all vertex elements referencing it compatible. */
+ uint32_t compatible_vb_mask_all;
+
+ /* Which buffer has at least one vertex element referencing it
+ * non-instanced. */
+ uint32_t noninstance_vb_mask_any;
+
+ void *driver_cso;
};
enum {
@@ -64,101 +78,128 @@ enum {
VB_NUM = 3
};
-struct u_vbuf_priv {
- struct u_vbuf b;
+struct u_vbuf {
+ struct u_vbuf_caps caps;
+
struct pipe_context *pipe;
struct translate_cache *translate_cache;
struct cso_cache *cso_cache;
+ struct u_upload_mgr *uploader;
- /* Vertex element state bound by the state tracker. */
- void *saved_ve;
- /* and its associated helper structure for this module. */
- struct u_vbuf_elements *ve;
+ /* This is what was set in set_vertex_buffers.
+ * May contain user buffers. */
+ struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
+ unsigned nr_vertex_buffers;
+
+ /* Saved vertex buffers. */
+ struct pipe_vertex_buffer vertex_buffer_saved[PIPE_MAX_ATTRIBS];
+ unsigned nr_vertex_buffers_saved;
+
+ /* Vertex buffers for the driver.
+ * There are no user buffers. */
+ struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS];
+ int nr_real_vertex_buffers;
+ boolean vertex_buffers_dirty;
+
+ /* The index buffer. */
+ struct pipe_index_buffer index_buffer;
+
+ /* Vertex elements. */
+ struct u_vbuf_elements *ve, *ve_saved;
/* Vertex elements used for the translate fallback. */
struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS];
/* If non-NULL, this is a vertex element state used for the translate
* fallback and therefore used for rendering too. */
- void *fallback_ve;
+ boolean using_translate;
/* The vertex buffer slot index where translated vertices have been
* stored in. */
unsigned fallback_vbs[VB_NUM];
- /* When binding the fallback vertex element state, we don't want to
- * change saved_ve and ve. This is set to TRUE in such cases. */
- boolean ve_binding_lock;
-
- /* Whether there is any user buffer. */
- boolean any_user_vbs;
- /* Whether there is a buffer with a non-native layout. */
- boolean incompatible_vb_layout;
- /* Per-buffer flags. */
- boolean incompatible_vb[PIPE_MAX_ATTRIBS];
+
+ /* Which buffer is a user buffer. */
+ uint32_t user_vb_mask; /* each bit describes a corresp. buffer */
+ /* Which buffer is incompatible (unaligned). */
+ uint32_t incompatible_vb_mask; /* each bit describes a corresp. buffer */
+ /* Which buffer has a non-zero stride. */
+ uint32_t nonzero_stride_vb_mask; /* each bit describes a corresp. buffer */
};
-static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr)
-{
- struct pipe_screen *screen = mgr->pipe->screen;
+static void *
+u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count,
+ const struct pipe_vertex_element *attribs);
+static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso);
- mgr->b.caps.format_fixed32 =
+
+void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps)
+{
+ caps->format_fixed32 =
screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER,
0, PIPE_BIND_VERTEX_BUFFER);
- mgr->b.caps.format_float16 =
+ caps->format_float16 =
screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER,
0, PIPE_BIND_VERTEX_BUFFER);
- mgr->b.caps.format_float64 =
+ caps->format_float64 =
screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER,
0, PIPE_BIND_VERTEX_BUFFER);
- mgr->b.caps.format_norm32 =
+ caps->format_norm32 =
screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER,
0, PIPE_BIND_VERTEX_BUFFER) &&
screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER,
0, PIPE_BIND_VERTEX_BUFFER);
- mgr->b.caps.format_scaled32 =
+ caps->format_scaled32 =
screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER,
0, PIPE_BIND_VERTEX_BUFFER) &&
screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER,
0, PIPE_BIND_VERTEX_BUFFER);
+
+ caps->buffer_offset_unaligned =
+ !screen->get_param(screen,
+ PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY);
+
+ caps->buffer_stride_unaligned =
+ !screen->get_param(screen,
+ PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY);
+
+ caps->velem_src_offset_unaligned =
+ !screen->get_param(screen,
+ PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY);
+
+ caps->user_vertex_buffers =
+ screen->get_param(screen, PIPE_CAP_USER_VERTEX_BUFFERS);
}
struct u_vbuf *
u_vbuf_create(struct pipe_context *pipe,
- unsigned upload_buffer_size,
- unsigned upload_buffer_alignment,
- unsigned upload_buffer_bind,
- enum u_fetch_alignment fetch_alignment)
+ struct u_vbuf_caps *caps)
{
- struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv);
+ struct u_vbuf *mgr = CALLOC_STRUCT(u_vbuf);
+ mgr->caps = *caps;
mgr->pipe = pipe;
mgr->cso_cache = cso_cache_create();
mgr->translate_cache = translate_cache_create();
memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs));
- mgr->b.uploader = u_upload_create(pipe, upload_buffer_size,
- upload_buffer_alignment,
- upload_buffer_bind);
-
- mgr->b.caps.fetch_dword_unaligned =
- fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED;
-
- u_vbuf_init_format_caps(mgr);
+ mgr->uploader = u_upload_create(pipe, 1024 * 1024, 4,
+ PIPE_BIND_VERTEX_BUFFER);
- return &mgr->b;
+ return mgr;
}
-/* XXX I had to fork this off of cso_context. */
-static void *
-u_vbuf_pipe_set_vertex_elements(struct u_vbuf_priv *mgr,
- unsigned count,
- const struct pipe_vertex_element *states)
+/* u_vbuf uses its own caching for vertex elements, because it needs to keep
+ * its own preprocessed state per vertex element CSO. */
+static struct u_vbuf_elements *
+u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr, unsigned count,
+ const struct pipe_vertex_element *states)
{
+ struct pipe_context *pipe = mgr->pipe;
unsigned key_size, hash_key;
struct cso_hash_iter iter;
- void *handle;
+ struct u_vbuf_elements *ve;
struct cso_velems_state velems_state;
/* need to include the count into the stored state data too. */
@@ -173,47 +214,52 @@ u_vbuf_pipe_set_vertex_elements(struct u_vbuf_priv *mgr,
if (cso_hash_iter_is_null(iter)) {
struct cso_velements *cso = MALLOC_STRUCT(cso_velements);
memcpy(&cso->state, &velems_state, key_size);
- cso->data =
- mgr->pipe->create_vertex_elements_state(mgr->pipe, count,
- &cso->state.velems[0]);
- cso->delete_state =
- (cso_state_callback)mgr->pipe->delete_vertex_elements_state;
- cso->context = mgr->pipe;
+ cso->data = u_vbuf_create_vertex_elements(mgr, count, states);
+ cso->delete_state = (cso_state_callback)u_vbuf_delete_vertex_elements;
+ cso->context = (void*)mgr;
iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso);
- handle = cso->data;
+ ve = cso->data;
} else {
- handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
+ ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data;
}
- mgr->pipe->bind_vertex_elements_state(mgr->pipe, handle);
- return handle;
+ assert(ve);
+ pipe->bind_vertex_elements_state(pipe, ve->driver_cso);
+ return ve;
+}
+
+void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count,
+ const struct pipe_vertex_element *states)
+{
+ mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, count, states);
}
-void u_vbuf_destroy(struct u_vbuf *mgrb)
+void u_vbuf_destroy(struct u_vbuf *mgr)
{
- struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
unsigned i;
- for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
- pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
+ mgr->pipe->set_vertex_buffers(mgr->pipe, 0, NULL);
+
+ for (i = 0; i < mgr->nr_vertex_buffers; i++) {
+ pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL);
}
- for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) {
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
+ for (i = 0; i < mgr->nr_real_vertex_buffers; i++) {
+ pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL);
}
translate_cache_destroy(mgr->translate_cache);
- u_upload_destroy(mgr->b.uploader);
+ u_upload_destroy(mgr->uploader);
cso_cache_delete(mgr->cso_cache);
FREE(mgr);
}
static void
-u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key,
+u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
unsigned vb_mask, unsigned out_vb,
int start_vertex, unsigned num_vertices,
int start_index, unsigned num_indices, int min_index,
- bool unroll_indices)
+ boolean unroll_indices)
{
struct translate *tr;
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
@@ -225,14 +271,14 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key,
tr = translate_cache_find(mgr->translate_cache, key);
/* Map buffers we want to translate. */
- for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
+ for (i = 0; i < mgr->nr_vertex_buffers; i++) {
if (vb_mask & (1 << i)) {
- struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i];
+ struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[i];
unsigned offset = vb->buffer_offset + vb->stride * start_vertex;
uint8_t *map;
- if (u_vbuf_resource(vb->buffer)->user_ptr) {
- map = u_vbuf_resource(vb->buffer)->user_ptr + offset;
+ if (vb->buffer->user_ptr) {
+ map = vb->buffer->user_ptr + offset;
} else {
unsigned size = vb->stride ? num_vertices * vb->stride
: sizeof(double)*4;
@@ -256,15 +302,15 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key,
/* Translate. */
if (unroll_indices) {
- struct pipe_index_buffer *ib = &mgr->b.index_buffer;
+ struct pipe_index_buffer *ib = &mgr->index_buffer;
struct pipe_transfer *transfer = NULL;
unsigned offset = ib->offset + start_index * ib->index_size;
uint8_t *map;
assert(ib->buffer && ib->index_size);
- if (u_vbuf_resource(ib->buffer)->user_ptr) {
- map = u_vbuf_resource(ib->buffer)->user_ptr + offset;
+ if (ib->buffer->user_ptr) {
+ map = ib->buffer->user_ptr + offset;
} else {
map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset,
num_indices * ib->index_size,
@@ -272,7 +318,7 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key,
}
/* Create and map the output buffer. */
- u_upload_alloc(mgr->b.uploader, 0,
+ u_upload_alloc(mgr->uploader, 0,
key->output_stride * num_indices,
&out_offset, &out_buffer,
(void**)&out_map);
@@ -294,7 +340,7 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key,
}
} else {
/* Create and map the output buffer. */
- u_upload_alloc(mgr->b.uploader,
+ u_upload_alloc(mgr->uploader,
key->output_stride * start_vertex,
key->output_stride * num_vertices,
&out_offset, &out_buffer,
@@ -306,64 +352,52 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key,
}
/* Unmap all buffers. */
- for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
+ for (i = 0; i < mgr->nr_vertex_buffers; i++) {
if (vb_transfer[i]) {
pipe_buffer_unmap(mgr->pipe, vb_transfer[i]);
}
}
/* Setup the new vertex buffer. */
- mgr->b.real_vertex_buffer[out_vb].buffer_offset = out_offset;
- mgr->b.real_vertex_buffer[out_vb].stride = key->output_stride;
+ mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset;
+ mgr->real_vertex_buffer[out_vb].stride = key->output_stride;
/* Move the buffer reference. */
pipe_resource_reference(
- &mgr->b.real_vertex_buffer[out_vb].buffer, NULL);
- mgr->b.real_vertex_buffer[out_vb].buffer = out_buffer;
+ &mgr->real_vertex_buffer[out_vb].buffer, NULL);
+ mgr->real_vertex_buffer[out_vb].buffer = out_buffer;
}
static boolean
-u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr,
+u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
unsigned mask[VB_NUM])
{
- unsigned i, type;
- unsigned nr = mgr->ve->count;
- boolean used_vb[PIPE_MAX_ATTRIBS] = {0};
+ unsigned type;
unsigned fallback_vbs[VB_NUM];
+ /* Set the bit for each buffer which is incompatible, or isn't set. */
+ uint32_t unused_vb_mask =
+ mgr->ve->incompatible_vb_mask_all | mgr->incompatible_vb_mask |
+ ~((1 << mgr->nr_vertex_buffers) - 1);
memset(fallback_vbs, ~0, sizeof(fallback_vbs));
- /* Mark used vertex buffers as... used. */
- for (i = 0; i < nr; i++) {
- if (!mgr->ve->incompatible_layout_elem[i]) {
- unsigned index = mgr->ve->ve[i].vertex_buffer_index;
-
- if (!mgr->incompatible_vb[index]) {
- used_vb[index] = TRUE;
- }
- }
- }
-
/* Find free slots for each type if needed. */
- i = 0;
for (type = 0; type < VB_NUM; type++) {
if (mask[type]) {
- for (; i < PIPE_MAX_ATTRIBS; i++) {
- if (!used_vb[i]) {
- /*printf("found slot=%i for type=%i\n", i, type);*/
- fallback_vbs[type] = i;
- i++;
- if (i > mgr->b.nr_real_vertex_buffers) {
- mgr->b.nr_real_vertex_buffers = i;
- }
- break;
- }
- }
- if (i == PIPE_MAX_ATTRIBS) {
+ uint32_t index;
+
+ if (!unused_vb_mask) {
/* fail, reset the number to its original value */
- mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers;
+ mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers;
return FALSE;
}
+
+ index = ffs(unused_vb_mask) - 1;
+ fallback_vbs[type] = index;
+ if (index >= mgr->nr_real_vertex_buffers) {
+ mgr->nr_real_vertex_buffers = index + 1;
+ }
+ /*printf("found slot=%i for type=%i\n", index, type);*/
}
}
@@ -372,11 +406,11 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr,
}
static boolean
-u_vbuf_translate_begin(struct u_vbuf_priv *mgr,
+u_vbuf_translate_begin(struct u_vbuf *mgr,
int start_vertex, unsigned num_vertices,
int start_instance, unsigned num_instances,
int start_index, unsigned num_indices, int min_index,
- bool unroll_indices)
+ boolean unroll_indices)
{
unsigned mask[VB_NUM] = {0};
struct translate_key key[VB_NUM];
@@ -403,22 +437,22 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr,
for (i = 0; i < mgr->ve->count; i++) {
unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index;
- if (!mgr->b.vertex_buffer[vb_index].stride) {
- if (!mgr->ve->incompatible_layout_elem[i] &&
- !mgr->incompatible_vb[vb_index]) {
+ if (!mgr->vertex_buffer[vb_index].stride) {
+ if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
+ !(mgr->incompatible_vb_mask & (1 << vb_index))) {
continue;
}
mask[VB_CONST] |= 1 << vb_index;
} else if (mgr->ve->ve[i].instance_divisor) {
- if (!mgr->ve->incompatible_layout_elem[i] &&
- !mgr->incompatible_vb[vb_index]) {
+ if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
+ !(mgr->incompatible_vb_mask & (1 << vb_index))) {
continue;
}
mask[VB_INSTANCE] |= 1 << vb_index;
} else {
if (!unroll_indices &&
- !mgr->ve->incompatible_layout_elem[i] &&
- !mgr->incompatible_vb[vb_index]) {
+ !(mgr->ve->incompatible_elem_mask & (1 << i)) &&
+ !(mgr->incompatible_vb_mask & (1 << vb_index))) {
continue;
}
mask[VB_VERTEX] |= 1 << vb_index;
@@ -439,8 +473,8 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr,
unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index;
bit = 1 << vb_index;
- if (!mgr->ve->incompatible_layout_elem[i] &&
- !mgr->incompatible_vb[vb_index] &&
+ if (!(mgr->ve->incompatible_elem_mask & (1 << i)) &&
+ !(mgr->incompatible_vb_mask & (1 << vb_index)) &&
(!unroll_indices || !(mask[VB_VERTEX] & bit))) {
continue;
}
@@ -484,7 +518,7 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr,
/* Fixup the stride for constant attribs. */
if (type == VB_CONST) {
- mgr->b.real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0;
+ mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0;
}
}
}
@@ -512,55 +546,48 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr,
}
}
- /* Preserve saved_ve. */
- mgr->ve_binding_lock = TRUE;
- mgr->fallback_ve = u_vbuf_pipe_set_vertex_elements(mgr, mgr->ve->count,
- mgr->fallback_velems);
- mgr->ve_binding_lock = FALSE;
+ u_vbuf_set_vertex_elements_internal(mgr, mgr->ve->count,
+ mgr->fallback_velems);
+ mgr->using_translate = TRUE;
return TRUE;
}
-static void u_vbuf_translate_end(struct u_vbuf_priv *mgr)
+static void u_vbuf_translate_end(struct u_vbuf *mgr)
{
unsigned i;
/* Restore vertex elements. */
- /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */
- mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve);
- mgr->fallback_ve = NULL;
+ mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso);
+ mgr->using_translate = FALSE;
/* Unreference the now-unused VBOs. */
for (i = 0; i < VB_NUM; i++) {
unsigned vb = mgr->fallback_vbs[i];
if (vb != ~0) {
- pipe_resource_reference(&mgr->b.real_vertex_buffer[vb].buffer, NULL);
+ pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL);
mgr->fallback_vbs[i] = ~0;
}
}
- mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers;
+ mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers;
}
#define FORMAT_REPLACE(what, withwhat) \
case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break
-struct u_vbuf_elements *
-u_vbuf_create_vertex_elements(struct u_vbuf *mgrb,
- unsigned count,
- const struct pipe_vertex_element *attribs,
- struct pipe_vertex_element *native_attribs)
+static void *
+u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count,
+ const struct pipe_vertex_element *attribs)
{
- struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
+ struct pipe_context *pipe = mgr->pipe;
unsigned i;
+ struct pipe_vertex_element driver_attribs[PIPE_MAX_ATTRIBS];
struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements);
+ uint32_t used_buffers = 0;
ve->count = count;
- if (!count) {
- return ve;
- }
-
memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count);
- memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count);
+ memcpy(driver_attribs, attribs, sizeof(struct pipe_vertex_element) * count);
/* Set the best native format in case the original format is not
* supported. */
@@ -569,10 +596,16 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb,
ve->src_format_size[i] = util_format_get_blocksize(format);
+ used_buffers |= 1 << ve->ve[i].vertex_buffer_index;
+
+ if (!ve->ve[i].instance_divisor) {
+ ve->noninstance_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index;
+ }
+
/* Choose a native format.
* For now we don't care about the alignment, that's going to
* be sorted out later. */
- if (!mgr->b.caps.format_fixed32) {
+ if (!mgr->caps.format_fixed32) {
switch (format) {
FORMAT_REPLACE(R32_FIXED, R32_FLOAT);
FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT);
@@ -581,7 +614,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb,
default:;
}
}
- if (!mgr->b.caps.format_float16) {
+ if (!mgr->caps.format_float16) {
switch (format) {
FORMAT_REPLACE(R16_FLOAT, R32_FLOAT);
FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT);
@@ -590,7 +623,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb,
default:;
}
}
- if (!mgr->b.caps.format_float64) {
+ if (!mgr->caps.format_float64) {
switch (format) {
FORMAT_REPLACE(R64_FLOAT, R32_FLOAT);
FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT);
@@ -599,7 +632,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb,
default:;
}
}
- if (!mgr->b.caps.format_norm32) {
+ if (!mgr->caps.format_norm32) {
switch (format) {
FORMAT_REPLACE(R32_UNORM, R32_FLOAT);
FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT);
@@ -612,7 +645,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb,
default:;
}
}
- if (!mgr->b.caps.format_scaled32) {
+ if (!mgr->caps.format_scaled32) {
switch (format) {
FORMAT_REPLACE(R32_USCALED, R32_FLOAT);
FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT);
@@ -626,115 +659,106 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb,
}
}
- native_attribs[i].src_format = format;
+ driver_attribs[i].src_format = format;
ve->native_format[i] = format;
ve->native_format_size[i] =
util_format_get_blocksize(ve->native_format[i]);
- ve->incompatible_layout_elem[i] =
- ve->ve[i].src_format != ve->native_format[i] ||
- (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0);
- ve->incompatible_layout =
- ve->incompatible_layout ||
- ve->incompatible_layout_elem[i];
+ if (ve->ve[i].src_format != format ||
+ (!mgr->caps.velem_src_offset_unaligned &&
+ ve->ve[i].src_offset % 4 != 0)) {
+ ve->incompatible_elem_mask |= 1 << i;
+ ve->incompatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index;
+ } else {
+ ve->compatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index;
+ }
}
+ ve->compatible_vb_mask_all = ~ve->incompatible_vb_mask_any & used_buffers;
+ ve->incompatible_vb_mask_all = ~ve->compatible_vb_mask_any & used_buffers;
+
/* Align the formats to the size of DWORD if needed. */
- if (!mgr->b.caps.fetch_dword_unaligned) {
+ if (!mgr->caps.velem_src_offset_unaligned) {
for (i = 0; i < count; i++) {
ve->native_format_size[i] = align(ve->native_format_size[i], 4);
}
}
+ ve->driver_cso =
+ pipe->create_vertex_elements_state(pipe, count, driver_attribs);
return ve;
}
-void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb,
- void *cso,
- struct u_vbuf_elements *ve)
+static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso)
{
- struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
-
- if (!cso) {
- return;
- }
-
- if (!mgr->ve_binding_lock) {
- mgr->saved_ve = cso;
- mgr->ve = ve;
- }
-}
+ struct pipe_context *pipe = mgr->pipe;
+ struct u_vbuf_elements *ve = cso;
-void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr,
- struct u_vbuf_elements *ve)
-{
+ pipe->delete_vertex_elements_state(pipe, ve->driver_cso);
FREE(ve);
}
-void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb,
- unsigned count,
+void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, unsigned count,
const struct pipe_vertex_buffer *bufs)
{
- struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
unsigned i;
- mgr->any_user_vbs = FALSE;
- mgr->incompatible_vb_layout = FALSE;
- memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb));
-
- if (!mgr->b.caps.fetch_dword_unaligned) {
- /* Check if the strides and offsets are aligned to the size of DWORD. */
- for (i = 0; i < count; i++) {
- if (bufs[i].buffer) {
- if (bufs[i].stride % 4 != 0 ||
- bufs[i].buffer_offset % 4 != 0) {
- mgr->incompatible_vb_layout = TRUE;
- mgr->incompatible_vb[i] = TRUE;
- }
- }
- }
- }
+ mgr->user_vb_mask = 0;
+ mgr->incompatible_vb_mask = 0;
+ mgr->nonzero_stride_vb_mask = 0;
for (i = 0; i < count; i++) {
const struct pipe_vertex_buffer *vb = &bufs[i];
+ struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[i];
+ struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[i];
- pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer);
+ pipe_resource_reference(&orig_vb->buffer, vb->buffer);
- mgr->b.real_vertex_buffer[i].buffer_offset =
- mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset;
+ real_vb->buffer_offset = orig_vb->buffer_offset = vb->buffer_offset;
+ real_vb->stride = orig_vb->stride = vb->stride;
- mgr->b.real_vertex_buffer[i].stride =
- mgr->b.vertex_buffer[i].stride = vb->stride;
+ if (vb->stride) {
+ mgr->nonzero_stride_vb_mask |= 1 << i;
+ }
+
+ if (!vb->buffer) {
+ pipe_resource_reference(&real_vb->buffer, NULL);
+ continue;
+ }
- if (!vb->buffer ||
- mgr->incompatible_vb[i]) {
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
+ if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) ||
+ (!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) {
+ mgr->incompatible_vb_mask |= 1 << i;
+ pipe_resource_reference(&real_vb->buffer, NULL);
continue;
}
- if (u_vbuf_resource(vb->buffer)->user_ptr) {
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
- mgr->any_user_vbs = TRUE;
+ if (!mgr->caps.user_vertex_buffers && vb->buffer->user_ptr) {
+ mgr->user_vb_mask |= 1 << i;
+ pipe_resource_reference(&real_vb->buffer, NULL);
continue;
}
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer);
+ pipe_resource_reference(&real_vb->buffer, vb->buffer);
}
- for (i = count; i < mgr->b.nr_vertex_buffers; i++) {
- pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
+ for (i = count; i < mgr->nr_vertex_buffers; i++) {
+ pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL);
}
- for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) {
- pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
+ for (i = count; i < mgr->nr_real_vertex_buffers; i++) {
+ pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL);
}
- mgr->b.nr_vertex_buffers = count;
- mgr->b.nr_real_vertex_buffers = count;
+ mgr->nr_vertex_buffers = count;
+ mgr->nr_real_vertex_buffers = count;
+ mgr->vertex_buffers_dirty = TRUE;
}
void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
const struct pipe_index_buffer *ib)
{
+ struct pipe_context *pipe = mgr->pipe;
+
if (ib && ib->buffer) {
assert(ib->offset % ib->index_size == 0);
pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer);
@@ -743,18 +767,20 @@ void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
} else {
pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
}
+
+ pipe->set_index_buffer(pipe, ib);
}
static void
-u_vbuf_upload_buffers(struct u_vbuf_priv *mgr,
+u_vbuf_upload_buffers(struct u_vbuf *mgr,
int start_vertex, unsigned num_vertices,
int start_instance, unsigned num_instances)
{
unsigned i;
unsigned nr_velems = mgr->ve->count;
- unsigned nr_vbufs = mgr->b.nr_vertex_buffers;
+ unsigned nr_vbufs = mgr->nr_vertex_buffers;
struct pipe_vertex_element *velems =
- mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve;
+ mgr->using_translate ? mgr->fallback_velems : mgr->ve->ve;
unsigned start_offset[PIPE_MAX_ATTRIBS];
unsigned end_offset[PIPE_MAX_ATTRIBS] = {0};
@@ -762,7 +788,7 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr,
for (i = 0; i < nr_velems; i++) {
struct pipe_vertex_element *velem = &velems[i];
unsigned index = velem->vertex_buffer_index;
- struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index];
+ struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index];
unsigned instance_div, first, size;
/* Skip the buffers generated by translate. */
@@ -774,7 +800,7 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr,
assert(vb->buffer);
- if (!u_vbuf_resource(vb->buffer)->user_ptr) {
+ if (!vb->buffer->user_ptr) {
continue;
}
@@ -820,129 +846,35 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr,
start = start_offset[i];
assert(start < end);
- real_vb = &mgr->b.real_vertex_buffer[i];
- ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr;
+ real_vb = &mgr->real_vertex_buffer[i];
+ ptr = mgr->vertex_buffer[i].buffer->user_ptr;
- u_upload_data(mgr->b.uploader, start, end - start, ptr + start,
+ u_upload_data(mgr->uploader, start, end - start, ptr + start,
&real_vb->buffer_offset, &real_vb->buffer);
real_vb->buffer_offset -= start;
}
}
-unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb)
-{
- struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
- unsigned i, nr = mgr->ve->count;
- struct pipe_vertex_element *velems =
- mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve;
- unsigned result = ~0;
-
- for (i = 0; i < nr; i++) {
- struct pipe_vertex_buffer *vb =
- &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index];
- unsigned size, max_count, value;
-
- /* We're not interested in constant and per-instance attribs. */
- if (!vb->buffer ||
- !vb->stride ||
- velems[i].instance_divisor) {
- continue;
- }
-
- size = vb->buffer->width0;
-
- /* Subtract buffer_offset. */
- value = vb->buffer_offset;
- if (value >= size) {
- return 0;
- }
- size -= value;
-
- /* Subtract src_offset. */
- value = velems[i].src_offset;
- if (value >= size) {
- return 0;
- }
- size -= value;
-
- /* Subtract format_size. */
- value = mgr->ve->native_format_size[i];
- if (value >= size) {
- return 0;
- }
- size -= value;
-
- /* Compute the max count. */
- max_count = 1 + size / vb->stride;
- result = MIN2(result, max_count);
- }
- return result;
-}
-
-static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr)
+static boolean u_vbuf_need_minmax_index(struct u_vbuf *mgr)
{
- unsigned i, nr = mgr->ve->count;
-
- for (i = 0; i < nr; i++) {
- struct pipe_vertex_buffer *vb;
- unsigned index;
-
- /* Per-instance attribs don't need min/max_index. */
- if (mgr->ve->ve[i].instance_divisor) {
- continue;
- }
-
- index = mgr->ve->ve[i].vertex_buffer_index;
- vb = &mgr->b.vertex_buffer[index];
-
- /* Constant attribs don't need min/max_index. */
- if (!vb->stride) {
- continue;
- }
-
- /* Per-vertex attribs need min/max_index. */
- if (u_vbuf_resource(vb->buffer)->user_ptr ||
- mgr->ve->incompatible_layout_elem[i] ||
- mgr->incompatible_vb[index]) {
- return TRUE;
- }
- }
-
- return FALSE;
+ /* See if there are any per-vertex attribs which will be uploaded or
+ * translated. Use bitmasks to get the info instead of looping over vertex
+ * elements. */
+ return ((mgr->user_vb_mask | mgr->incompatible_vb_mask |
+ mgr->ve->incompatible_vb_mask_any) &
+ mgr->ve->noninstance_vb_mask_any & mgr->nonzero_stride_vb_mask) != 0;
}
-static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf_priv *mgr)
+static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf *mgr)
{
- unsigned i, nr = mgr->ve->count;
-
- for (i = 0; i < nr; i++) {
- struct pipe_vertex_buffer *vb;
- unsigned index;
-
- /* Per-instance attribs are not per-vertex data. */
- if (mgr->ve->ve[i].instance_divisor) {
- continue;
- }
-
- index = mgr->ve->ve[i].vertex_buffer_index;
- vb = &mgr->b.vertex_buffer[index];
-
- /* Constant attribs are not per-vertex data. */
- if (!vb->stride) {
- continue;
- }
-
- /* Return true for the hw buffers which don't need to be translated. */
- /* XXX we could use some kind of a is-busy query. */
- if (!u_vbuf_resource(vb->buffer)->user_ptr &&
- !mgr->ve->incompatible_layout_elem[i] &&
- !mgr->incompatible_vb[index]) {
- return TRUE;
- }
- }
-
- return FALSE;
+ /* Return true if there are hw buffers which don't need to be translated.
+ *
+ * We could query whether each buffer is busy, but that would
+ * be way more costly than this. */
+ return (~mgr->user_vb_mask & ~mgr->incompatible_vb_mask &
+ mgr->ve->compatible_vb_mask_all & mgr->ve->noninstance_vb_mask_any &
+ mgr->nonzero_stride_vb_mask) != 0;
}
static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
@@ -956,8 +888,8 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
unsigned i;
unsigned restart_index = info->restart_index;
- if (u_vbuf_resource(ib->buffer)->user_ptr) {
- indices = u_vbuf_resource(ib->buffer)->user_ptr +
+ if (ib->buffer->user_ptr) {
+ indices = ib->buffer->user_ptr +
ib->offset + info->start * ib->index_size;
} else {
indices = pipe_buffer_map_range(pipe, ib->buffer,
@@ -1044,38 +976,42 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
}
}
-enum u_vbuf_return_flags
-u_vbuf_draw_begin(struct u_vbuf *mgrb,
- struct pipe_draw_info *info)
+void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
{
- struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
+ struct pipe_context *pipe = mgr->pipe;
int start_vertex, min_index;
unsigned num_vertices;
- bool unroll_indices = false;
+ boolean unroll_indices = FALSE;
+ uint32_t user_vb_mask = mgr->user_vb_mask;
+
+ /* Normal draw. No fallback and no user buffers. */
+ if (!mgr->incompatible_vb_mask &&
+ !mgr->ve->incompatible_elem_mask &&
+ !user_vb_mask) {
+ /* Set vertex buffers if needed. */
+ if (mgr->vertex_buffers_dirty) {
+ pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers,
+ mgr->real_vertex_buffer);
+ mgr->vertex_buffers_dirty = FALSE;
+ }
- if (!mgr->incompatible_vb_layout &&
- !mgr->ve->incompatible_layout &&
- !mgr->any_user_vbs) {
- return 0;
+ pipe->draw_vbo(pipe, info);
+ return;
}
if (info->indexed) {
- int max_index;
- bool index_bounds_valid = false;
-
- if (info->max_index != ~0) {
- min_index = info->min_index;
- max_index = info->max_index;
- index_bounds_valid = true;
- } else if (u_vbuf_need_minmax_index(mgr)) {
- u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info,
- &min_index, &max_index);
- index_bounds_valid = true;
- }
+ /* See if anything needs to be done for per-vertex attribs. */
+ if (u_vbuf_need_minmax_index(mgr)) {
+ int max_index;
+
+ if (info->max_index != ~0) {
+ min_index = info->min_index;
+ max_index = info->max_index;
+ } else {
+ u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, info,
+ &min_index, &max_index);
+ }
- /* If the index bounds are valid, it means some upload or translation
- * of per-vertex attribs will be performed. */
- if (index_bounds_valid) {
assert(min_index <= max_index);
start_vertex = min_index + info->index_bias;
@@ -1090,7 +1026,9 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb,
num_vertices-info->count > 32 &&
!u_vbuf_mapping_vertex_buffer_blocks(mgr)) {
/*printf("num_vertices=%i count=%i\n", num_vertices, info->count);*/
- unroll_indices = true;
+ unroll_indices = TRUE;
+ user_vb_mask &= ~(mgr->nonzero_stride_vb_mask &
+ mgr->ve->noninstance_vb_mask_any);
}
} else {
/* Nothing to do for per-vertex attribs. */
@@ -1106,17 +1044,20 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb,
/* Translate vertices with non-native layouts or formats. */
if (unroll_indices ||
- mgr->incompatible_vb_layout ||
- mgr->ve->incompatible_layout) {
+ mgr->incompatible_vb_mask ||
+ mgr->ve->incompatible_elem_mask) {
/* XXX check the return value */
u_vbuf_translate_begin(mgr, start_vertex, num_vertices,
info->start_instance, info->instance_count,
info->start, info->count, min_index,
unroll_indices);
+
+ user_vb_mask &= ~(mgr->incompatible_vb_mask |
+ mgr->ve->incompatible_vb_mask_all);
}
/* Upload user buffers. */
- if (mgr->any_user_vbs) {
+ if (user_vb_mask) {
u_vbuf_upload_buffers(mgr, start_vertex, num_vertices,
info->start_instance, info->instance_count);
}
@@ -1130,34 +1071,75 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb,
}
unsigned i;
- for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
+ for (i = 0; i < mgr->nr_vertex_buffers; i++) {
printf("input %i: ", i);
- util_dump_vertex_buffer(stdout, mgr->b.vertex_buffer+i);
+ util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i);
printf("\n");
}
- for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) {
+ for (i = 0; i < mgr->nr_real_vertex_buffers; i++) {
printf("real %i: ", i);
- util_dump_vertex_buffer(stdout, mgr->b.real_vertex_buffer+i);
+ util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i);
printf("\n");
}
*/
- if (unroll_indices) {
- info->indexed = FALSE;
- info->index_bias = 0;
- info->min_index = 0;
- info->max_index = info->count - 1;
- info->start = 0;
+ u_upload_unmap(mgr->uploader);
+ pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers,
+ mgr->real_vertex_buffer);
+
+ if (unlikely(unroll_indices)) {
+ struct pipe_draw_info new_info = *info;
+ new_info.indexed = FALSE;
+ new_info.index_bias = 0;
+ new_info.min_index = 0;
+ new_info.max_index = info->count - 1;
+ new_info.start = 0;
+
+ pipe->draw_vbo(pipe, &new_info);
+ } else {
+ pipe->draw_vbo(pipe, info);
+ }
+
+ if (mgr->using_translate) {
+ u_vbuf_translate_end(mgr);
}
+ mgr->vertex_buffers_dirty = TRUE;
+}
- return U_VBUF_BUFFERS_UPDATED;
+void u_vbuf_save_vertex_elements(struct u_vbuf *mgr)
+{
+ assert(!mgr->ve_saved);
+ mgr->ve_saved = mgr->ve;
}
-void u_vbuf_draw_end(struct u_vbuf *mgrb)
+void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr)
{
- struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
+ if (mgr->ve != mgr->ve_saved) {
+ struct pipe_context *pipe = mgr->pipe;
- if (mgr->fallback_ve) {
- u_vbuf_translate_end(mgr);
+ mgr->ve = mgr->ve_saved;
+ pipe->bind_vertex_elements_state(pipe,
+ mgr->ve ? mgr->ve->driver_cso : NULL);
+ }
+ mgr->ve_saved = NULL;
+}
+
+void u_vbuf_save_vertex_buffers(struct u_vbuf *mgr)
+{
+ util_copy_vertex_buffers(mgr->vertex_buffer_saved,
+ &mgr->nr_vertex_buffers_saved,
+ mgr->vertex_buffer,
+ mgr->nr_vertex_buffers);
+}
+
+void u_vbuf_restore_vertex_buffers(struct u_vbuf *mgr)
+{
+ unsigned i;
+
+ u_vbuf_set_vertex_buffers(mgr, mgr->nr_vertex_buffers_saved,
+ mgr->vertex_buffer_saved);
+ for (i = 0; i < mgr->nr_vertex_buffers_saved; i++) {
+ pipe_resource_reference(&mgr->vertex_buffer_saved[i].buffer, NULL);
}
+ mgr->nr_vertex_buffers_saved = 0;
}
diff --git a/mesalib/src/gallium/auxiliary/util/u_vbuf.h b/mesalib/src/gallium/auxiliary/util/u_vbuf.h
index 3669c9b87..59eb59a11 100644
--- a/mesalib/src/gallium/auxiliary/util/u_vbuf.h
+++ b/mesalib/src/gallium/auxiliary/util/u_vbuf.h
@@ -25,8 +25,8 @@
*
**************************************************************************/
-#ifndef U_VBUF_MGR_H
-#define U_VBUF_MGR_H
+#ifndef U_VBUF_H
+#define U_VBUF_H
/* This module builds upon u_upload_mgr and translate_cache and takes care of
* user buffer uploads and vertex format fallbacks. It's designed
@@ -35,7 +35,9 @@
#include "pipe/p_context.h"
#include "pipe/p_state.h"
-#include "util/u_transfer.h"
+
+struct cso_context;
+struct u_vbuf;
/* Hardware vertex fetcher limitations can be described by this structure. */
struct u_vbuf_caps {
@@ -47,99 +49,38 @@ struct u_vbuf_caps {
unsigned format_norm32:1; /* PIPE_FORMAT_*32*NORM */
unsigned format_scaled32:1; /* PIPE_FORMAT_*32*SCALED */
- /* Whether vertex fetches don't have to be dword-aligned. */
+ /* Whether vertex fetches don't have to be 4-byte-aligned. */
/* TRUE if hardware supports it. */
- unsigned fetch_dword_unaligned:1;
-};
-
-/* The manager.
- * This structure should also be used to access vertex buffers
- * from a driver. */
-struct u_vbuf {
- /* This is what was set in set_vertex_buffers.
- * May contain user buffers. */
- struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- unsigned nr_vertex_buffers;
-
- /* Contains only real vertex buffers.
- * Hardware drivers should use real_vertex_buffers[i]
- * instead of vertex_buffers[i].buffer. */
- struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS];
- int nr_real_vertex_buffers;
-
- /* The index buffer. */
- struct pipe_index_buffer index_buffer;
-
- /* This uploader can optionally be used by the driver.
- *
- * Allowed functions:
- * - u_upload_alloc
- * - u_upload_data
- * - u_upload_buffer
- * - u_upload_flush */
- struct u_upload_mgr *uploader;
-
- struct u_vbuf_caps caps;
-};
-
-struct u_vbuf_resource {
- struct u_resource b;
- uint8_t *user_ptr;
-};
+ unsigned buffer_offset_unaligned:1;
+ unsigned buffer_stride_unaligned:1;
+ unsigned velem_src_offset_unaligned:1;
-/* Opaque type containing information about vertex elements for the manager. */
-struct u_vbuf_elements;
-
-enum u_fetch_alignment {
- U_VERTEX_FETCH_BYTE_ALIGNED,
- U_VERTEX_FETCH_DWORD_ALIGNED
+ /* Whether the driver supports user vertex buffers. */
+ unsigned user_vertex_buffers:1;
};
-enum u_vbuf_return_flags {
- U_VBUF_BUFFERS_UPDATED = 1
-};
+void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps);
struct u_vbuf *
u_vbuf_create(struct pipe_context *pipe,
- unsigned upload_buffer_size,
- unsigned upload_buffer_alignment,
- unsigned upload_buffer_bind,
- enum u_fetch_alignment fetch_alignment);
+ struct u_vbuf_caps *caps);
void u_vbuf_destroy(struct u_vbuf *mgr);
-struct u_vbuf_elements *
-u_vbuf_create_vertex_elements(struct u_vbuf *mgr,
- unsigned count,
- const struct pipe_vertex_element *attrs,
- struct pipe_vertex_element *native_attrs);
-
-void u_vbuf_bind_vertex_elements(struct u_vbuf *mgr,
- void *cso,
- struct u_vbuf_elements *ve);
-
-void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr,
- struct u_vbuf_elements *ve);
-
-void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
- unsigned count,
+/* State and draw functions. */
+void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count,
+ const struct pipe_vertex_element *states);
+void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, unsigned count,
const struct pipe_vertex_buffer *bufs);
-
void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
const struct pipe_index_buffer *ib);
+void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info);
-enum u_vbuf_return_flags u_vbuf_draw_begin(struct u_vbuf *mgr,
- struct pipe_draw_info *info);
-
-unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgr);
-
-void u_vbuf_draw_end(struct u_vbuf *mgr);
-
-
-static INLINE struct u_vbuf_resource *u_vbuf_resource(struct pipe_resource *r)
-{
- return (struct u_vbuf_resource*)r;
-}
+/* Save/restore functionality. */
+void u_vbuf_save_vertex_elements(struct u_vbuf *mgr);
+void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr);
+void u_vbuf_save_vertex_buffers(struct u_vbuf *mgr);
+void u_vbuf_restore_vertex_buffers(struct u_vbuf *mgr);
#endif
diff --git a/mesalib/src/glsl/Makefile b/mesalib/src/glsl/Makefile
index f6c722997..7c80c95d8 100644
--- a/mesalib/src/glsl/Makefile
+++ b/mesalib/src/glsl/Makefile
@@ -165,7 +165,7 @@ glcpp/glcpp-parse.c: glcpp/glcpp-parse.y
builtin_compiler: $(GLSL2_OBJECTS) $(OBJECTS) builtin_stubs.o
$(APP_CXX) $(INCLUDES) $(CXXFLAGS) $(LDFLAGS) $(OBJECTS) $(GLSL2_OBJECTS) builtin_stubs.o -o $@
-builtin_function.cpp: builtins/profiles/* builtins/ir/* builtins/tools/generate_builtins.py builtins/tools/texture_builtins.py builtin_compiler
+builtin_function.cpp: builtins/profiles/* builtins/ir/* builtins/glsl/* builtins/tools/generate_builtins.py builtins/tools/texture_builtins.py builtin_compiler
@echo Regenerating builtin_function.cpp...
$(PYTHON2) $(PYTHON_FLAGS) builtins/tools/generate_builtins.py ./builtin_compiler > builtin_function.cpp || rm -f builtin_function.cpp
diff --git a/mesalib/src/glsl/ir_reader.cpp b/mesalib/src/glsl/ir_reader.cpp
index 7ce683ef7..03dbb67c3 100644
--- a/mesalib/src/glsl/ir_reader.cpp
+++ b/mesalib/src/glsl/ir_reader.cpp
@@ -407,6 +407,8 @@ ir_reader::read_declaration(s_expression *expr)
var->mode = ir_var_out;
} else if (strcmp(qualifier->value(), "inout") == 0) {
var->mode = ir_var_inout;
+ } else if (strcmp(qualifier->value(), "temporary") == 0) {
+ var->mode = ir_var_temporary;
} else if (strcmp(qualifier->value(), "smooth") == 0) {
var->interpolation = INTERP_QUALIFIER_SMOOTH;
} else if (strcmp(qualifier->value(), "flat") == 0) {
diff --git a/mesalib/src/glsl/main.cpp b/mesalib/src/glsl/main.cpp
index 8c651b8c0..fbcc4aebb 100644
--- a/mesalib/src/glsl/main.cpp
+++ b/mesalib/src/glsl/main.cpp
@@ -33,6 +33,15 @@
#define O_RDONLY _O_RDONLY
#endif
+/** @file main.cpp
+ *
+ * This file is the main() routine and scaffolding for producing
+ * builtin_compiler (which doesn't include builtins itself and is used
+ * to generate the profile information for builtin_function.cpp), and
+ * for glsl_compiler (which does include builtins and can be used to
+ * offline compile GLSL code and examine the resulting GLSL IR.
+ */
+
#include "ast.h"
#include "glsl_parser_extras.h"
#include "ir_optimization.h"
diff --git a/mesalib/src/glsl/opt_dead_functions.cpp b/mesalib/src/glsl/opt_dead_functions.cpp
index 017a28639..f50349395 100644
--- a/mesalib/src/glsl/opt_dead_functions.cpp
+++ b/mesalib/src/glsl/opt_dead_functions.cpp
@@ -62,8 +62,6 @@ public:
signature_entry *get_signature_entry(ir_function_signature *var);
- bool (*predicate)(ir_instruction *ir);
-
/* List of signature_entry */
exec_list signature_list;
void *mem_ctx;
diff --git a/mesalib/src/glsl/opt_structure_splitting.cpp b/mesalib/src/glsl/opt_structure_splitting.cpp
index 6dd228e4e..a21238dd0 100644
--- a/mesalib/src/glsl/opt_structure_splitting.cpp
+++ b/mesalib/src/glsl/opt_structure_splitting.cpp
@@ -201,7 +201,6 @@ public:
variable_entry2 *get_splitting_entry(ir_variable *var);
exec_list *variable_list;
- void *mem_ctx;
};
variable_entry2 *
diff --git a/mesalib/src/mesa/main/version.c b/mesalib/src/mesa/main/version.c
index 5b8b65ddf..09a930cf2 100644
--- a/mesalib/src/mesa/main/version.c
+++ b/mesalib/src/mesa/main/version.c
@@ -124,6 +124,7 @@ compute_version(struct gl_context *ctx)
ctx->Extensions.EXT_texture_sRGB);
const GLboolean ver_3_0 = (ver_2_1 &&
ctx->Const.GLSLVersion >= 130 &&
+ ctx->Const.MaxSamples >= 4 &&
ctx->Extensions.ARB_color_buffer_float &&
ctx->Extensions.ARB_depth_buffer_float &&
ctx->Extensions.ARB_half_float_pixel &&
diff --git a/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c b/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c
index 5e078a85e..9a3f22465 100644
--- a/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c
+++ b/mesalib/src/mesa/state_tracker/st_cb_drawpixels.c
@@ -1509,7 +1509,15 @@ st_CopyPixels(struct gl_context *ctx, GLint srcx, GLint srcy,
readY = srcy;
readW = width;
readH = height;
- _mesa_clip_readpixels(ctx, &readX, &readY, &readW, &readH, &pack);
+ if (!_mesa_clip_readpixels(ctx, &readX, &readY, &readW, &readH, &pack)) {
+ /* The source region is completely out of bounds. Do nothing.
+ * The GL spec says "Results of copies from outside the window,
+ * or from regions of the window that are not exposed, are
+ * hardware dependent and undefined."
+ */
+ return;
+ }
+
readW = MAX2(0, readW);
readH = MAX2(0, readH);
diff --git a/mesalib/src/mesa/state_tracker/st_context.c b/mesalib/src/mesa/state_tracker/st_context.c
index 0245fd92b..19d9da131 100644
--- a/mesalib/src/mesa/state_tracker/st_context.c
+++ b/mesalib/src/mesa/state_tracker/st_context.c
@@ -65,6 +65,7 @@
#include "util/u_inlines.h"
#include "util/u_upload_mgr.h"
#include "cso_cache/cso_context.h"
+#include "util/u_vbuf.h"
DEBUG_GET_ONCE_BOOL_OPTION(mesa_mvp_dp4, "MESA_MVP_DP4", FALSE)
@@ -111,6 +112,29 @@ st_get_msaa(void)
}
+static void st_init_vbuf(struct st_context *st)
+{
+ struct u_vbuf_caps caps;
+
+ u_vbuf_get_caps(st->pipe->screen, &caps);
+
+ /* Create u_vbuf if there is anything unsupported. */
+ if (!caps.buffer_offset_unaligned ||
+ !caps.buffer_stride_unaligned ||
+ !caps.velem_src_offset_unaligned ||
+ !caps.format_fixed32 ||
+ !caps.format_float16 ||
+ !caps.format_float64 ||
+ !caps.format_norm32 ||
+ !caps.format_scaled32 ||
+ !caps.user_vertex_buffers) {
+ /* XXX user vertex buffers are always uploaded regardless of the CAP. */
+ st->vbuf = u_vbuf_create(st->pipe, &caps);
+ cso_install_vbuf(st->cso_context, st->vbuf);
+ }
+}
+
+
static struct st_context *
st_create_context_priv( struct gl_context *ctx, struct pipe_context *pipe )
{
@@ -134,6 +158,7 @@ st_create_context_priv( struct gl_context *ctx, struct pipe_context *pipe )
st->uploader = u_upload_create(st->pipe, 65536, 4, PIPE_BIND_VERTEX_BUFFER);
st->cso_context = cso_create_context(pipe);
+ st_init_vbuf(st);
st_init_atoms( st );
st_init_bitmap(st);
st_init_clear(st);
@@ -245,6 +270,7 @@ static void st_destroy_context_priv( struct st_context *st )
void st_destroy_context( struct st_context *st )
{
struct pipe_context *pipe = st->pipe;
+ struct u_vbuf *vbuf = st->vbuf;
struct cso_context *cso = st->cso_context;
struct gl_context *ctx = st->ctx;
GLuint i;
@@ -275,7 +301,13 @@ void st_destroy_context( struct st_context *st )
_mesa_free_context_data(ctx);
+ /* This will free the st_context too, so 'st' must not be accessed
+ * afterwards. */
st_destroy_context_priv(st);
+ st = NULL;
+
+ if (vbuf)
+ u_vbuf_destroy(vbuf);
cso_destroy_context(cso);
diff --git a/mesalib/src/mesa/state_tracker/st_context.h b/mesalib/src/mesa/state_tracker/st_context.h
index e3d65d97a..3ec98ada1 100644
--- a/mesalib/src/mesa/state_tracker/st_context.h
+++ b/mesalib/src/mesa/state_tracker/st_context.h
@@ -41,6 +41,7 @@ struct gen_mipmap_state;
struct st_context;
struct st_fragment_program;
struct u_upload_mgr;
+struct u_vbuf;
#define ST_NEW_MESA (1 << 0) /* Mesa state has changed */
@@ -73,6 +74,8 @@ struct st_context
struct pipe_context *pipe;
struct u_upload_mgr *uploader;
+ struct u_vbuf *vbuf;
+
struct draw_context *draw; /**< For selection/feedback/rastpos only */
struct draw_stage *feedback_stage; /**< For GL_FEEDBACK rendermode */
struct draw_stage *selection_stage; /**< For GL_SELECT rendermode */
diff --git a/mesalib/src/mesa/state_tracker/st_draw.c b/mesalib/src/mesa/state_tracker/st_draw.c
index 0a35ab2be..edab76bf5 100644
--- a/mesalib/src/mesa/state_tracker/st_draw.c
+++ b/mesalib/src/mesa/state_tracker/st_draw.c
@@ -795,7 +795,8 @@ find_sub_primitives(const void *elements, unsigned element_size,
* sub-primitives.
*/
static void
-handle_fallback_primitive_restart(struct pipe_context *pipe,
+handle_fallback_primitive_restart(struct cso_context *cso,
+ struct pipe_context *pipe,
const struct _mesa_index_buffer *ib,
struct pipe_index_buffer *ibuffer,
struct pipe_draw_info *orig_info)
@@ -851,7 +852,7 @@ handle_fallback_primitive_restart(struct pipe_context *pipe,
info.start = sub_prims[i].start;
info.count = sub_prims[i].count;
if (u_trim_pipe_prim(info.mode, &info.count)) {
- pipe->draw_vbo(pipe, &info);
+ cso_draw_vbo(cso, &info);
}
}
}
@@ -1075,7 +1076,7 @@ st_draw_vbo(struct gl_context *ctx,
}
setup_index_buffer(ctx, ib, &ibuffer);
- pipe->set_index_buffer(pipe, &ibuffer);
+ cso_set_index_buffer(st->cso_context, &ibuffer);
util_draw_init_info(&info);
if (ib) {
@@ -1110,20 +1111,21 @@ st_draw_vbo(struct gl_context *ctx,
}
if (info.count_from_stream_output) {
- pipe->draw_vbo(pipe, &info);
+ cso_draw_vbo(st->cso_context, &info);
}
else if (info.primitive_restart) {
if (st->sw_primitive_restart) {
/* Handle primitive restart for drivers that doesn't support it */
- handle_fallback_primitive_restart(pipe, ib, &ibuffer, &info);
+ handle_fallback_primitive_restart(st->cso_context, pipe, ib,
+ &ibuffer, &info);
}
else {
/* don't trim, restarts might be inside index list */
- pipe->draw_vbo(pipe, &info);
+ cso_draw_vbo(st->cso_context, &info);
}
}
else if (u_trim_pipe_prim(info.mode, &info.count))
- pipe->draw_vbo(pipe, &info);
+ cso_draw_vbo(st->cso_context, &info);
}
pipe_resource_reference(&ibuffer.buffer, NULL);
diff --git a/mesalib/src/mesa/state_tracker/st_extensions.c b/mesalib/src/mesa/state_tracker/st_extensions.c
index 34e0329be..1b4bca681 100644
--- a/mesalib/src/mesa/state_tracker/st_extensions.c
+++ b/mesalib/src/mesa/state_tracker/st_extensions.c
@@ -459,12 +459,6 @@ void st_init_extensions(struct st_context *st)
/* Required: vertex fetch support. */
static const struct st_extension_format_mapping vertex_mapping[] = {
- { { o(ARB_ES2_compatibility) },
- { PIPE_FORMAT_R32G32B32A32_FIXED } },
-
- { { o(ARB_half_float_vertex) },
- { PIPE_FORMAT_R16G16B16A16_FLOAT } },
-
{ { o(ARB_vertex_type_2_10_10_10_rev) },
{ PIPE_FORMAT_R10G10B10A2_UNORM,
PIPE_FORMAT_B10G10R10A2_UNORM,
@@ -479,6 +473,7 @@ void st_init_extensions(struct st_context *st)
/*
* Extensions that are supported by all Gallium drivers:
*/
+ ctx->Extensions.ARB_ES2_compatibility = GL_TRUE;
ctx->Extensions.ARB_copy_buffer = GL_TRUE;
ctx->Extensions.ARB_draw_elements_base_vertex = GL_TRUE;
ctx->Extensions.ARB_explicit_attrib_location = GL_TRUE;
@@ -486,6 +481,7 @@ void st_init_extensions(struct st_context *st)
ctx->Extensions.ARB_fragment_program = GL_TRUE;
ctx->Extensions.ARB_fragment_shader = GL_TRUE;
ctx->Extensions.ARB_half_float_pixel = GL_TRUE;
+ ctx->Extensions.ARB_half_float_vertex = GL_TRUE;
ctx->Extensions.ARB_map_buffer_range = GL_TRUE;
ctx->Extensions.ARB_sampler_objects = GL_TRUE;
ctx->Extensions.ARB_shader_objects = GL_TRUE;
diff --git a/mesalib/src/mesa/vbo/vbo_exec.h b/mesalib/src/mesa/vbo/vbo_exec.h
index 5cdf5ced9..be9f3d78d 100644
--- a/mesalib/src/mesa/vbo/vbo_exec.h
+++ b/mesalib/src/mesa/vbo/vbo_exec.h
@@ -189,7 +189,8 @@ static inline void
vbo_draw_method(struct vbo_exec_context *exec, enum draw_method method)
{
if (exec->last_draw_method != method) {
- exec->ctx->NewState |= _NEW_ARRAY;
+ struct gl_context *ctx = exec->ctx;
+ ctx->Driver.UpdateState(ctx, _NEW_ARRAY);
exec->last_draw_method = method;
}
}
diff --git a/mesalib/src/mesa/vbo/vbo_exec_array.c b/mesalib/src/mesa/vbo/vbo_exec_array.c
index f95e062fb..2dcfb8e5b 100644
--- a/mesalib/src/mesa/vbo/vbo_exec_array.c
+++ b/mesalib/src/mesa/vbo/vbo_exec_array.c
@@ -438,14 +438,6 @@ recalculate_input_bindings(struct gl_context *ctx)
inputs[VERT_ATTRIB_GENERIC(i)] = &vbo->currval[VBO_ATTRIB_GENERIC0+i];
const_inputs |= VERT_BIT_GENERIC(i);
}
-
- /* There is no need to make _NEW_ARRAY dirty here for the TnL program,
- * because it already takes care of invalidating the state necessary
- * to revalidate vertex arrays. Not marking the state as dirty also
- * improves performance (quite significantly in some apps).
- */
- if (!ctx->VertexProgram._MaintainTnlProgram)
- ctx->NewState |= _NEW_ARRAY;
break;
case VP_NV:
@@ -472,8 +464,6 @@ recalculate_input_bindings(struct gl_context *ctx)
inputs[VERT_ATTRIB_GENERIC(i)] = &vbo->currval[VBO_ATTRIB_GENERIC0+i];
const_inputs |= VERT_BIT_GENERIC(i);
}
-
- ctx->NewState |= _NEW_ARRAY;
break;
case VP_ARB:
@@ -512,11 +502,11 @@ recalculate_input_bindings(struct gl_context *ctx)
}
inputs[VERT_ATTRIB_GENERIC0] = inputs[0];
- ctx->NewState |= _NEW_ARRAY;
break;
}
_mesa_set_varying_vp_inputs( ctx, VERT_BIT_ALL & (~const_inputs) );
+ ctx->Driver.UpdateState(ctx, _NEW_ARRAY);
}
@@ -640,11 +630,11 @@ vbo_exec_DrawArrays(GLenum mode, GLint start, GLsizei count)
_mesa_debug(ctx, "glDrawArrays(%s, %d, %d)\n",
_mesa_lookup_enum_by_nr(mode), start, count);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawArrays( ctx, mode, start, count ))
return;
- FLUSH_CURRENT( ctx, 0 );
-
if (0)
check_draw_arrays_data(ctx, start, count);
@@ -669,11 +659,11 @@ vbo_exec_DrawArraysInstanced(GLenum mode, GLint start, GLsizei count,
_mesa_debug(ctx, "glDrawArraysInstanced(%s, %d, %d, %d)\n",
_mesa_lookup_enum_by_nr(mode), start, count, numInstances);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawArraysInstanced(ctx, mode, start, count, numInstances))
return;
- FLUSH_CURRENT( ctx, 0 );
-
if (0)
check_draw_arrays_data(ctx, start, count);
@@ -761,8 +751,6 @@ vbo_validated_drawrangeelements(struct gl_context *ctx, GLenum mode,
struct _mesa_index_buffer ib;
struct _mesa_prim prim[1];
- FLUSH_CURRENT( ctx, 0 );
-
vbo_bind_arrays(ctx);
ib.count = count;
@@ -838,6 +826,8 @@ vbo_exec_DrawRangeElementsBaseVertex(GLenum mode,
_mesa_lookup_enum_by_nr(mode), start, end, count,
_mesa_lookup_enum_by_nr(type), indices, basevertex);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawRangeElements( ctx, mode, start, end, count,
type, indices, basevertex ))
return;
@@ -936,6 +926,8 @@ vbo_exec_DrawElements(GLenum mode, GLsizei count, GLenum type,
_mesa_lookup_enum_by_nr(mode), count,
_mesa_lookup_enum_by_nr(type), indices);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawElements( ctx, mode, count, type, indices, 0 ))
return;
@@ -958,6 +950,8 @@ vbo_exec_DrawElementsBaseVertex(GLenum mode, GLsizei count, GLenum type,
_mesa_lookup_enum_by_nr(mode), count,
_mesa_lookup_enum_by_nr(type), indices, basevertex);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawElements( ctx, mode, count, type, indices,
basevertex ))
return;
@@ -981,6 +975,8 @@ vbo_exec_DrawElementsInstanced(GLenum mode, GLsizei count, GLenum type,
_mesa_lookup_enum_by_nr(mode), count,
_mesa_lookup_enum_by_nr(type), indices, numInstances);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawElementsInstanced(ctx, mode, count, type, indices,
numInstances, 0))
return;
@@ -1005,6 +1001,8 @@ vbo_exec_DrawElementsInstancedBaseVertex(GLenum mode, GLsizei count, GLenum type
_mesa_lookup_enum_by_nr(type), indices,
numInstances, basevertex);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawElementsInstanced(ctx, mode, count, type, indices,
numInstances, basevertex))
return;
@@ -1037,8 +1035,6 @@ vbo_validated_multidrawelements(struct gl_context *ctx, GLenum mode,
if (primcount == 0)
return;
- FLUSH_CURRENT( ctx, 0 );
-
prim = calloc(1, primcount * sizeof(*prim));
if (prim == NULL) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glMultiDrawElements");
@@ -1226,12 +1222,12 @@ vbo_exec_DrawTransformFeedback(GLenum mode, GLuint name)
_mesa_debug(ctx, "glDrawTransformFeedback(%s, %d)\n",
_mesa_lookup_enum_by_nr(mode), name);
+ FLUSH_CURRENT(ctx, 0);
+
if (!_mesa_validate_DrawTransformFeedback(ctx, mode, obj)) {
return;
}
- FLUSH_CURRENT(ctx, 0);
-
vbo_draw_transform_feedback(ctx, mode, obj, 1);
}
diff --git a/mesalib/src/mesa/vbo/vbo_exec_draw.c b/mesalib/src/mesa/vbo/vbo_exec_draw.c
index 5cc6586b3..da5ca695e 100644
--- a/mesalib/src/mesa/vbo/vbo_exec_draw.c
+++ b/mesalib/src/mesa/vbo/vbo_exec_draw.c
@@ -253,11 +253,11 @@ vbo_exec_bind_arrays( struct gl_context *ctx )
arrays[attr]._MaxElement = count; /* ??? */
varying_inputs |= VERT_BIT(attr);
- ctx->NewState |= _NEW_ARRAY;
}
}
_mesa_set_varying_vp_inputs( ctx, varying_inputs );
+ ctx->Driver.UpdateState(ctx, _NEW_ARRAY);
}
diff --git a/mesalib/src/mesa/vbo/vbo_save_draw.c b/mesalib/src/mesa/vbo/vbo_save_draw.c
index 57186de64..88a9a7e34 100644
--- a/mesalib/src/mesa/vbo/vbo_save_draw.c
+++ b/mesalib/src/mesa/vbo/vbo_save_draw.c
@@ -209,11 +209,11 @@ static void vbo_bind_vertex_list(struct gl_context *ctx,
buffer_offset += node_attrsz[src] * sizeof(GLfloat);
varying_inputs |= VERT_BIT(attr);
- ctx->NewState |= _NEW_ARRAY;
}
}
_mesa_set_varying_vp_inputs( ctx, varying_inputs );
+ ctx->Driver.UpdateState(ctx, _NEW_ARRAY);
}