diff options
author | marha <marha@users.sourceforge.net> | 2012-04-30 10:36:15 +0200 |
---|---|---|
committer | marha <marha@users.sourceforge.net> | 2012-04-30 10:36:15 +0200 |
commit | 762b7fde3d57d3a151f98535fd31516b7e823bc0 (patch) | |
tree | 11c32921b96808f2aab11a86863534fb28b274f6 /mesalib/src/gallium | |
parent | b68922d51f52ca6ab9daa0105ef5c57f35bfbdcf (diff) | |
download | vcxsrv-762b7fde3d57d3a151f98535fd31516b7e823bc0.tar.gz vcxsrv-762b7fde3d57d3a151f98535fd31516b7e823bc0.tar.bz2 vcxsrv-762b7fde3d57d3a151f98535fd31516b7e823bc0.zip |
fontconfig libX11 libfontenc mesa pixman xserver git update 30 Apr 2012
Diffstat (limited to 'mesalib/src/gallium')
-rw-r--r-- | mesalib/src/gallium/auxiliary/util/u_blitter.c | 1 | ||||
-rw-r--r-- | mesalib/src/gallium/auxiliary/util/u_draw_quad.c | 269 | ||||
-rw-r--r-- | mesalib/src/gallium/auxiliary/util/u_vbuf.c | 728 | ||||
-rw-r--r-- | mesalib/src/gallium/auxiliary/util/u_vbuf.h | 105 |
4 files changed, 513 insertions, 590 deletions
diff --git a/mesalib/src/gallium/auxiliary/util/u_blitter.c b/mesalib/src/gallium/auxiliary/util/u_blitter.c index 6a9141209..d0b9187d1 100644 --- a/mesalib/src/gallium/auxiliary/util/u_blitter.c +++ b/mesalib/src/gallium/auxiliary/util/u_blitter.c @@ -733,6 +733,7 @@ static void blitter_draw(struct blitter_context_priv *ctx, u_upload_unmap(ctx->upload); util_draw_vertex_buffer(ctx->base.pipe, NULL, buf, offset, PIPE_PRIM_TRIANGLE_FAN, 4, 2); + pipe_resource_reference(&buf, NULL); } static void blitter_draw_rectangle(struct blitter_context *blitter, diff --git a/mesalib/src/gallium/auxiliary/util/u_draw_quad.c b/mesalib/src/gallium/auxiliary/util/u_draw_quad.c index 371fefb9e..590fa0c36 100644 --- a/mesalib/src/gallium/auxiliary/util/u_draw_quad.c +++ b/mesalib/src/gallium/auxiliary/util/u_draw_quad.c @@ -1,135 +1,134 @@ -/**************************************************************************
- *
- * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "pipe/p_context.h"
-#include "pipe/p_defines.h"
-#include "util/u_inlines.h"
-#include "util/u_draw_quad.h"
-#include "util/u_memory.h"
-#include "cso_cache/cso_context.h"
-
-
-/**
- * Draw a simple vertex buffer / primitive.
- * Limited to float[4] vertex attribs, tightly packed.
- */
-void
-util_draw_vertex_buffer(struct pipe_context *pipe,
- struct cso_context *cso,
- struct pipe_resource *vbuf,
- uint offset,
- uint prim_type,
- uint num_verts,
- uint num_attribs)
-{
- struct pipe_vertex_buffer vbuffer;
-
- assert(num_attribs <= PIPE_MAX_ATTRIBS);
-
- /* tell pipe about the vertex buffer */
- memset(&vbuffer, 0, sizeof(vbuffer));
- vbuffer.buffer = vbuf;
- vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
- vbuffer.buffer_offset = offset;
-
- if (cso) {
- cso_set_vertex_buffers(cso, 1, &vbuffer);
- } else {
- pipe->set_vertex_buffers(pipe, 1, &vbuffer);
- }
-
- /* note: vertex elements already set by caller */
-
- /* draw */
- util_draw_arrays(pipe, prim_type, 0, num_verts);
-}
-
-
-
-/**
- * Draw screen-aligned textured quad.
- * Note: this isn't especially efficient.
- */
-void
-util_draw_texquad(struct pipe_context *pipe, struct cso_context *cso,
- float x0, float y0, float x1, float y1, float z)
-{
- uint numAttribs = 2, i, j;
- uint vertexBytes = 4 * (4 * numAttribs * sizeof(float));
- struct pipe_resource *vbuf = NULL;
- float *v = NULL;
-
- v = MALLOC(vertexBytes);
- if (v == NULL)
- goto out;
-
- /*
- * Load vertex buffer
- */
- for (i = j = 0; i < 4; i++) {
- v[j + 2] = z; /* z */
- v[j + 3] = 1.0; /* w */
- v[j + 6] = 0.0; /* r */
- v[j + 7] = 1.0; /* q */
- j += 8;
- }
-
- v[0] = x0;
- v[1] = y0;
- v[4] = 0.0; /*s*/
- v[5] = 0.0; /*t*/
-
- v[8] = x1;
- v[9] = y0;
- v[12] = 1.0;
- v[13] = 0.0;
-
- v[16] = x1;
- v[17] = y1;
- v[20] = 1.0;
- v[21] = 1.0;
-
- v[24] = x0;
- v[25] = y1;
- v[28] = 0.0;
- v[29] = 1.0;
-
- vbuf = pipe_user_buffer_create(pipe->screen, v, vertexBytes,
- PIPE_BIND_VERTEX_BUFFER);
- if (!vbuf)
- goto out;
-
- util_draw_vertex_buffer(pipe, cso, vbuf, 0, PIPE_PRIM_TRIANGLE_FAN, 4, 2);
-
-out:
- if (vbuf)
- pipe_resource_reference(&vbuf, NULL);
-
- if (v)
- FREE(v);
-}
+/************************************************************************** + * + * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + + +#include "pipe/p_context.h" +#include "pipe/p_defines.h" +#include "util/u_inlines.h" +#include "util/u_draw_quad.h" +#include "util/u_memory.h" +#include "cso_cache/cso_context.h" + + +/** + * Draw a simple vertex buffer / primitive. + * Limited to float[4] vertex attribs, tightly packed. + */ +void +util_draw_vertex_buffer(struct pipe_context *pipe, + struct cso_context *cso, + struct pipe_resource *vbuf, + uint offset, + uint prim_type, + uint num_verts, + uint num_attribs) +{ + struct pipe_vertex_buffer vbuffer; + + assert(num_attribs <= PIPE_MAX_ATTRIBS); + + /* tell pipe about the vertex buffer */ + memset(&vbuffer, 0, sizeof(vbuffer)); + vbuffer.buffer = vbuf; + vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */ + vbuffer.buffer_offset = offset; + + /* note: vertex elements already set by caller */ + + if (cso) { + cso_set_vertex_buffers(cso, 1, &vbuffer); + cso_draw_arrays(cso, prim_type, 0, num_verts); + } else { + pipe->set_vertex_buffers(pipe, 1, &vbuffer); + util_draw_arrays(pipe, prim_type, 0, num_verts); + } +} + + + +/** + * Draw screen-aligned textured quad. + * Note: this isn't especially efficient. + */ +void +util_draw_texquad(struct pipe_context *pipe, struct cso_context *cso, + float x0, float y0, float x1, float y1, float z) +{ + uint numAttribs = 2, i, j; + uint vertexBytes = 4 * (4 * numAttribs * sizeof(float)); + struct pipe_resource *vbuf = NULL; + float *v = NULL; + + v = MALLOC(vertexBytes); + if (v == NULL) + goto out; + + /* + * Load vertex buffer + */ + for (i = j = 0; i < 4; i++) { + v[j + 2] = z; /* z */ + v[j + 3] = 1.0; /* w */ + v[j + 6] = 0.0; /* r */ + v[j + 7] = 1.0; /* q */ + j += 8; + } + + v[0] = x0; + v[1] = y0; + v[4] = 0.0; /*s*/ + v[5] = 0.0; /*t*/ + + v[8] = x1; + v[9] = y0; + v[12] = 1.0; + v[13] = 0.0; + + v[16] = x1; + v[17] = y1; + v[20] = 1.0; + v[21] = 1.0; + + v[24] = x0; + v[25] = y1; + v[28] = 0.0; + v[29] = 1.0; + + vbuf = pipe_user_buffer_create(pipe->screen, v, vertexBytes, + PIPE_BIND_VERTEX_BUFFER); + if (!vbuf) + goto out; + + util_draw_vertex_buffer(pipe, cso, vbuf, 0, PIPE_PRIM_TRIANGLE_FAN, 4, 2); + +out: + if (vbuf) + pipe_resource_reference(&vbuf, NULL); + + if (v) + FREE(v); +} diff --git a/mesalib/src/gallium/auxiliary/util/u_vbuf.c b/mesalib/src/gallium/auxiliary/util/u_vbuf.c index 50c35afd5..401c8a4a8 100644 --- a/mesalib/src/gallium/auxiliary/util/u_vbuf.c +++ b/mesalib/src/gallium/auxiliary/util/u_vbuf.c @@ -52,9 +52,23 @@ struct u_vbuf_elements { /* This might mean two things: * - src_format != native_format, as discussed above. * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ - boolean incompatible_layout; - /* Per-element flags. */ - boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; + uint32_t incompatible_elem_mask; /* each bit describes a corresp. attrib */ + /* Which buffer has at least one vertex element referencing it + * incompatible. */ + uint32_t incompatible_vb_mask_any; + /* Which buffer has all vertex elements referencing it incompatible. */ + uint32_t incompatible_vb_mask_all; + /* Which buffer has at least one vertex element referencing it + * compatible. */ + uint32_t compatible_vb_mask_any; + /* Which buffer has all vertex elements referencing it compatible. */ + uint32_t compatible_vb_mask_all; + + /* Which buffer has at least one vertex element referencing it + * non-instanced. */ + uint32_t noninstance_vb_mask_any; + + void *driver_cso; }; enum { @@ -64,101 +78,128 @@ enum { VB_NUM = 3 }; -struct u_vbuf_priv { - struct u_vbuf b; +struct u_vbuf { + struct u_vbuf_caps caps; + struct pipe_context *pipe; struct translate_cache *translate_cache; struct cso_cache *cso_cache; + struct u_upload_mgr *uploader; - /* Vertex element state bound by the state tracker. */ - void *saved_ve; - /* and its associated helper structure for this module. */ - struct u_vbuf_elements *ve; + /* This is what was set in set_vertex_buffers. + * May contain user buffers. */ + struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; + unsigned nr_vertex_buffers; + + /* Saved vertex buffers. */ + struct pipe_vertex_buffer vertex_buffer_saved[PIPE_MAX_ATTRIBS]; + unsigned nr_vertex_buffers_saved; + + /* Vertex buffers for the driver. + * There are no user buffers. */ + struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS]; + int nr_real_vertex_buffers; + boolean vertex_buffers_dirty; + + /* The index buffer. */ + struct pipe_index_buffer index_buffer; + + /* Vertex elements. */ + struct u_vbuf_elements *ve, *ve_saved; /* Vertex elements used for the translate fallback. */ struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; /* If non-NULL, this is a vertex element state used for the translate * fallback and therefore used for rendering too. */ - void *fallback_ve; + boolean using_translate; /* The vertex buffer slot index where translated vertices have been * stored in. */ unsigned fallback_vbs[VB_NUM]; - /* When binding the fallback vertex element state, we don't want to - * change saved_ve and ve. This is set to TRUE in such cases. */ - boolean ve_binding_lock; - - /* Whether there is any user buffer. */ - boolean any_user_vbs; - /* Whether there is a buffer with a non-native layout. */ - boolean incompatible_vb_layout; - /* Per-buffer flags. */ - boolean incompatible_vb[PIPE_MAX_ATTRIBS]; + + /* Which buffer is a user buffer. */ + uint32_t user_vb_mask; /* each bit describes a corresp. buffer */ + /* Which buffer is incompatible (unaligned). */ + uint32_t incompatible_vb_mask; /* each bit describes a corresp. buffer */ + /* Which buffer has a non-zero stride. */ + uint32_t nonzero_stride_vb_mask; /* each bit describes a corresp. buffer */ }; -static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) -{ - struct pipe_screen *screen = mgr->pipe->screen; +static void * +u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *attribs); +static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso); - mgr->b.caps.format_fixed32 = + +void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps) +{ + caps->format_fixed32 = screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_float16 = + caps->format_float16 = screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_float64 = + caps->format_float64 = screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_norm32 = + caps->format_norm32 = screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER) && screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_scaled32 = + caps->format_scaled32 = screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER) && screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); + + caps->buffer_offset_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY); + + caps->buffer_stride_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY); + + caps->velem_src_offset_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY); + + caps->user_vertex_buffers = + screen->get_param(screen, PIPE_CAP_USER_VERTEX_BUFFERS); } struct u_vbuf * u_vbuf_create(struct pipe_context *pipe, - unsigned upload_buffer_size, - unsigned upload_buffer_alignment, - unsigned upload_buffer_bind, - enum u_fetch_alignment fetch_alignment) + struct u_vbuf_caps *caps) { - struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); + struct u_vbuf *mgr = CALLOC_STRUCT(u_vbuf); + mgr->caps = *caps; mgr->pipe = pipe; mgr->cso_cache = cso_cache_create(); mgr->translate_cache = translate_cache_create(); memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); - mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, - upload_buffer_alignment, - upload_buffer_bind); - - mgr->b.caps.fetch_dword_unaligned = - fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; - - u_vbuf_init_format_caps(mgr); + mgr->uploader = u_upload_create(pipe, 1024 * 1024, 4, + PIPE_BIND_VERTEX_BUFFER); - return &mgr->b; + return mgr; } -/* XXX I had to fork this off of cso_context. */ -static void * -u_vbuf_pipe_set_vertex_elements(struct u_vbuf_priv *mgr, - unsigned count, - const struct pipe_vertex_element *states) +/* u_vbuf uses its own caching for vertex elements, because it needs to keep + * its own preprocessed state per vertex element CSO. */ +static struct u_vbuf_elements * +u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *states) { + struct pipe_context *pipe = mgr->pipe; unsigned key_size, hash_key; struct cso_hash_iter iter; - void *handle; + struct u_vbuf_elements *ve; struct cso_velems_state velems_state; /* need to include the count into the stored state data too. */ @@ -173,47 +214,52 @@ u_vbuf_pipe_set_vertex_elements(struct u_vbuf_priv *mgr, if (cso_hash_iter_is_null(iter)) { struct cso_velements *cso = MALLOC_STRUCT(cso_velements); memcpy(&cso->state, &velems_state, key_size); - cso->data = - mgr->pipe->create_vertex_elements_state(mgr->pipe, count, - &cso->state.velems[0]); - cso->delete_state = - (cso_state_callback)mgr->pipe->delete_vertex_elements_state; - cso->context = mgr->pipe; + cso->data = u_vbuf_create_vertex_elements(mgr, count, states); + cso->delete_state = (cso_state_callback)u_vbuf_delete_vertex_elements; + cso->context = (void*)mgr; iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso); - handle = cso->data; + ve = cso->data; } else { - handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data; + ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data; } - mgr->pipe->bind_vertex_elements_state(mgr->pipe, handle); - return handle; + assert(ve); + pipe->bind_vertex_elements_state(pipe, ve->driver_cso); + return ve; +} + +void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *states) +{ + mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, count, states); } -void u_vbuf_destroy(struct u_vbuf *mgrb) +void u_vbuf_destroy(struct u_vbuf *mgr) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; unsigned i; - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); + mgr->pipe->set_vertex_buffers(mgr->pipe, 0, NULL); + + for (i = 0; i < mgr->nr_vertex_buffers; i++) { + pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); } - for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { + pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); } translate_cache_destroy(mgr->translate_cache); - u_upload_destroy(mgr->b.uploader); + u_upload_destroy(mgr->uploader); cso_cache_delete(mgr->cso_cache); FREE(mgr); } static void -u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, +u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, unsigned vb_mask, unsigned out_vb, int start_vertex, unsigned num_vertices, int start_index, unsigned num_indices, int min_index, - bool unroll_indices) + boolean unroll_indices) { struct translate *tr; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; @@ -225,14 +271,14 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, tr = translate_cache_find(mgr->translate_cache, key); /* Map buffers we want to translate. */ - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_vertex_buffers; i++) { if (vb_mask & (1 << i)) { - struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; + struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[i]; unsigned offset = vb->buffer_offset + vb->stride * start_vertex; uint8_t *map; - if (u_vbuf_resource(vb->buffer)->user_ptr) { - map = u_vbuf_resource(vb->buffer)->user_ptr + offset; + if (vb->buffer->user_ptr) { + map = vb->buffer->user_ptr + offset; } else { unsigned size = vb->stride ? num_vertices * vb->stride : sizeof(double)*4; @@ -256,15 +302,15 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, /* Translate. */ if (unroll_indices) { - struct pipe_index_buffer *ib = &mgr->b.index_buffer; + struct pipe_index_buffer *ib = &mgr->index_buffer; struct pipe_transfer *transfer = NULL; unsigned offset = ib->offset + start_index * ib->index_size; uint8_t *map; assert(ib->buffer && ib->index_size); - if (u_vbuf_resource(ib->buffer)->user_ptr) { - map = u_vbuf_resource(ib->buffer)->user_ptr + offset; + if (ib->buffer->user_ptr) { + map = ib->buffer->user_ptr + offset; } else { map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, num_indices * ib->index_size, @@ -272,7 +318,7 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, } /* Create and map the output buffer. */ - u_upload_alloc(mgr->b.uploader, 0, + u_upload_alloc(mgr->uploader, 0, key->output_stride * num_indices, &out_offset, &out_buffer, (void**)&out_map); @@ -294,7 +340,7 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, } } else { /* Create and map the output buffer. */ - u_upload_alloc(mgr->b.uploader, + u_upload_alloc(mgr->uploader, key->output_stride * start_vertex, key->output_stride * num_vertices, &out_offset, &out_buffer, @@ -306,64 +352,52 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, } /* Unmap all buffers. */ - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_vertex_buffers; i++) { if (vb_transfer[i]) { pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); } } /* Setup the new vertex buffer. */ - mgr->b.real_vertex_buffer[out_vb].buffer_offset = out_offset; - mgr->b.real_vertex_buffer[out_vb].stride = key->output_stride; + mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset; + mgr->real_vertex_buffer[out_vb].stride = key->output_stride; /* Move the buffer reference. */ pipe_resource_reference( - &mgr->b.real_vertex_buffer[out_vb].buffer, NULL); - mgr->b.real_vertex_buffer[out_vb].buffer = out_buffer; + &mgr->real_vertex_buffer[out_vb].buffer, NULL); + mgr->real_vertex_buffer[out_vb].buffer = out_buffer; } static boolean -u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr, +u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr, unsigned mask[VB_NUM]) { - unsigned i, type; - unsigned nr = mgr->ve->count; - boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; + unsigned type; unsigned fallback_vbs[VB_NUM]; + /* Set the bit for each buffer which is incompatible, or isn't set. */ + uint32_t unused_vb_mask = + mgr->ve->incompatible_vb_mask_all | mgr->incompatible_vb_mask | + ~((1 << mgr->nr_vertex_buffers) - 1); memset(fallback_vbs, ~0, sizeof(fallback_vbs)); - /* Mark used vertex buffers as... used. */ - for (i = 0; i < nr; i++) { - if (!mgr->ve->incompatible_layout_elem[i]) { - unsigned index = mgr->ve->ve[i].vertex_buffer_index; - - if (!mgr->incompatible_vb[index]) { - used_vb[index] = TRUE; - } - } - } - /* Find free slots for each type if needed. */ - i = 0; for (type = 0; type < VB_NUM; type++) { if (mask[type]) { - for (; i < PIPE_MAX_ATTRIBS; i++) { - if (!used_vb[i]) { - /*printf("found slot=%i for type=%i\n", i, type);*/ - fallback_vbs[type] = i; - i++; - if (i > mgr->b.nr_real_vertex_buffers) { - mgr->b.nr_real_vertex_buffers = i; - } - break; - } - } - if (i == PIPE_MAX_ATTRIBS) { + uint32_t index; + + if (!unused_vb_mask) { /* fail, reset the number to its original value */ - mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; + mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers; return FALSE; } + + index = ffs(unused_vb_mask) - 1; + fallback_vbs[type] = index; + if (index >= mgr->nr_real_vertex_buffers) { + mgr->nr_real_vertex_buffers = index + 1; + } + /*printf("found slot=%i for type=%i\n", index, type);*/ } } @@ -372,11 +406,11 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr, } static boolean -u_vbuf_translate_begin(struct u_vbuf_priv *mgr, +u_vbuf_translate_begin(struct u_vbuf *mgr, int start_vertex, unsigned num_vertices, int start_instance, unsigned num_instances, int start_index, unsigned num_indices, int min_index, - bool unroll_indices) + boolean unroll_indices) { unsigned mask[VB_NUM] = {0}; struct translate_key key[VB_NUM]; @@ -403,22 +437,22 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, for (i = 0; i < mgr->ve->count; i++) { unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index; - if (!mgr->b.vertex_buffer[vb_index].stride) { - if (!mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index]) { + if (!mgr->vertex_buffer[vb_index].stride) { + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index))) { continue; } mask[VB_CONST] |= 1 << vb_index; } else if (mgr->ve->ve[i].instance_divisor) { - if (!mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index]) { + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index))) { continue; } mask[VB_INSTANCE] |= 1 << vb_index; } else { if (!unroll_indices && - !mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index]) { + !(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index))) { continue; } mask[VB_VERTEX] |= 1 << vb_index; @@ -439,8 +473,8 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; bit = 1 << vb_index; - if (!mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index] && + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index)) && (!unroll_indices || !(mask[VB_VERTEX] & bit))) { continue; } @@ -484,7 +518,7 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, /* Fixup the stride for constant attribs. */ if (type == VB_CONST) { - mgr->b.real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; + mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; } } } @@ -512,55 +546,48 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, } } - /* Preserve saved_ve. */ - mgr->ve_binding_lock = TRUE; - mgr->fallback_ve = u_vbuf_pipe_set_vertex_elements(mgr, mgr->ve->count, - mgr->fallback_velems); - mgr->ve_binding_lock = FALSE; + u_vbuf_set_vertex_elements_internal(mgr, mgr->ve->count, + mgr->fallback_velems); + mgr->using_translate = TRUE; return TRUE; } -static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) +static void u_vbuf_translate_end(struct u_vbuf *mgr) { unsigned i; /* Restore vertex elements. */ - /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ - mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); - mgr->fallback_ve = NULL; + mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso); + mgr->using_translate = FALSE; /* Unreference the now-unused VBOs. */ for (i = 0; i < VB_NUM; i++) { unsigned vb = mgr->fallback_vbs[i]; if (vb != ~0) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[vb].buffer, NULL); + pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL); mgr->fallback_vbs[i] = ~0; } } - mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; + mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers; } #define FORMAT_REPLACE(what, withwhat) \ case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break -struct u_vbuf_elements * -u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, - unsigned count, - const struct pipe_vertex_element *attribs, - struct pipe_vertex_element *native_attribs) +static void * +u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *attribs) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + struct pipe_context *pipe = mgr->pipe; unsigned i; + struct pipe_vertex_element driver_attribs[PIPE_MAX_ATTRIBS]; struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); + uint32_t used_buffers = 0; ve->count = count; - if (!count) { - return ve; - } - memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); - memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); + memcpy(driver_attribs, attribs, sizeof(struct pipe_vertex_element) * count); /* Set the best native format in case the original format is not * supported. */ @@ -569,10 +596,16 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, ve->src_format_size[i] = util_format_get_blocksize(format); + used_buffers |= 1 << ve->ve[i].vertex_buffer_index; + + if (!ve->ve[i].instance_divisor) { + ve->noninstance_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } + /* Choose a native format. * For now we don't care about the alignment, that's going to * be sorted out later. */ - if (!mgr->b.caps.format_fixed32) { + if (!mgr->caps.format_fixed32) { switch (format) { FORMAT_REPLACE(R32_FIXED, R32_FLOAT); FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); @@ -581,7 +614,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_float16) { + if (!mgr->caps.format_float16) { switch (format) { FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); @@ -590,7 +623,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_float64) { + if (!mgr->caps.format_float64) { switch (format) { FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); @@ -599,7 +632,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_norm32) { + if (!mgr->caps.format_norm32) { switch (format) { FORMAT_REPLACE(R32_UNORM, R32_FLOAT); FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); @@ -612,7 +645,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_scaled32) { + if (!mgr->caps.format_scaled32) { switch (format) { FORMAT_REPLACE(R32_USCALED, R32_FLOAT); FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); @@ -626,115 +659,106 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, } } - native_attribs[i].src_format = format; + driver_attribs[i].src_format = format; ve->native_format[i] = format; ve->native_format_size[i] = util_format_get_blocksize(ve->native_format[i]); - ve->incompatible_layout_elem[i] = - ve->ve[i].src_format != ve->native_format[i] || - (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); - ve->incompatible_layout = - ve->incompatible_layout || - ve->incompatible_layout_elem[i]; + if (ve->ve[i].src_format != format || + (!mgr->caps.velem_src_offset_unaligned && + ve->ve[i].src_offset % 4 != 0)) { + ve->incompatible_elem_mask |= 1 << i; + ve->incompatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } else { + ve->compatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } } + ve->compatible_vb_mask_all = ~ve->incompatible_vb_mask_any & used_buffers; + ve->incompatible_vb_mask_all = ~ve->compatible_vb_mask_any & used_buffers; + /* Align the formats to the size of DWORD if needed. */ - if (!mgr->b.caps.fetch_dword_unaligned) { + if (!mgr->caps.velem_src_offset_unaligned) { for (i = 0; i < count; i++) { ve->native_format_size[i] = align(ve->native_format_size[i], 4); } } + ve->driver_cso = + pipe->create_vertex_elements_state(pipe, count, driver_attribs); return ve; } -void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, - void *cso, - struct u_vbuf_elements *ve) +static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; - - if (!cso) { - return; - } - - if (!mgr->ve_binding_lock) { - mgr->saved_ve = cso; - mgr->ve = ve; - } -} + struct pipe_context *pipe = mgr->pipe; + struct u_vbuf_elements *ve = cso; -void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, - struct u_vbuf_elements *ve) -{ + pipe->delete_vertex_elements_state(pipe, ve->driver_cso); FREE(ve); } -void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, - unsigned count, +void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, unsigned count, const struct pipe_vertex_buffer *bufs) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; unsigned i; - mgr->any_user_vbs = FALSE; - mgr->incompatible_vb_layout = FALSE; - memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); - - if (!mgr->b.caps.fetch_dword_unaligned) { - /* Check if the strides and offsets are aligned to the size of DWORD. */ - for (i = 0; i < count; i++) { - if (bufs[i].buffer) { - if (bufs[i].stride % 4 != 0 || - bufs[i].buffer_offset % 4 != 0) { - mgr->incompatible_vb_layout = TRUE; - mgr->incompatible_vb[i] = TRUE; - } - } - } - } + mgr->user_vb_mask = 0; + mgr->incompatible_vb_mask = 0; + mgr->nonzero_stride_vb_mask = 0; for (i = 0; i < count; i++) { const struct pipe_vertex_buffer *vb = &bufs[i]; + struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[i]; + struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[i]; - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); + pipe_resource_reference(&orig_vb->buffer, vb->buffer); - mgr->b.real_vertex_buffer[i].buffer_offset = - mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; + real_vb->buffer_offset = orig_vb->buffer_offset = vb->buffer_offset; + real_vb->stride = orig_vb->stride = vb->stride; - mgr->b.real_vertex_buffer[i].stride = - mgr->b.vertex_buffer[i].stride = vb->stride; + if (vb->stride) { + mgr->nonzero_stride_vb_mask |= 1 << i; + } + + if (!vb->buffer) { + pipe_resource_reference(&real_vb->buffer, NULL); + continue; + } - if (!vb->buffer || - mgr->incompatible_vb[i]) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) || + (!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) { + mgr->incompatible_vb_mask |= 1 << i; + pipe_resource_reference(&real_vb->buffer, NULL); continue; } - if (u_vbuf_resource(vb->buffer)->user_ptr) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); - mgr->any_user_vbs = TRUE; + if (!mgr->caps.user_vertex_buffers && vb->buffer->user_ptr) { + mgr->user_vb_mask |= 1 << i; + pipe_resource_reference(&real_vb->buffer, NULL); continue; } - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); + pipe_resource_reference(&real_vb->buffer, vb->buffer); } - for (i = count; i < mgr->b.nr_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); + for (i = count; i < mgr->nr_vertex_buffers; i++) { + pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); } - for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + for (i = count; i < mgr->nr_real_vertex_buffers; i++) { + pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); } - mgr->b.nr_vertex_buffers = count; - mgr->b.nr_real_vertex_buffers = count; + mgr->nr_vertex_buffers = count; + mgr->nr_real_vertex_buffers = count; + mgr->vertex_buffers_dirty = TRUE; } void u_vbuf_set_index_buffer(struct u_vbuf *mgr, const struct pipe_index_buffer *ib) { + struct pipe_context *pipe = mgr->pipe; + if (ib && ib->buffer) { assert(ib->offset % ib->index_size == 0); pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); @@ -743,18 +767,20 @@ void u_vbuf_set_index_buffer(struct u_vbuf *mgr, } else { pipe_resource_reference(&mgr->index_buffer.buffer, NULL); } + + pipe->set_index_buffer(pipe, ib); } static void -u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, +u_vbuf_upload_buffers(struct u_vbuf *mgr, int start_vertex, unsigned num_vertices, int start_instance, unsigned num_instances) { unsigned i; unsigned nr_velems = mgr->ve->count; - unsigned nr_vbufs = mgr->b.nr_vertex_buffers; + unsigned nr_vbufs = mgr->nr_vertex_buffers; struct pipe_vertex_element *velems = - mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; + mgr->using_translate ? mgr->fallback_velems : mgr->ve->ve; unsigned start_offset[PIPE_MAX_ATTRIBS]; unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; @@ -762,7 +788,7 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, for (i = 0; i < nr_velems; i++) { struct pipe_vertex_element *velem = &velems[i]; unsigned index = velem->vertex_buffer_index; - struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; + struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index]; unsigned instance_div, first, size; /* Skip the buffers generated by translate. */ @@ -774,7 +800,7 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, assert(vb->buffer); - if (!u_vbuf_resource(vb->buffer)->user_ptr) { + if (!vb->buffer->user_ptr) { continue; } @@ -820,129 +846,35 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, start = start_offset[i]; assert(start < end); - real_vb = &mgr->b.real_vertex_buffer[i]; - ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; + real_vb = &mgr->real_vertex_buffer[i]; + ptr = mgr->vertex_buffer[i].buffer->user_ptr; - u_upload_data(mgr->b.uploader, start, end - start, ptr + start, + u_upload_data(mgr->uploader, start, end - start, ptr + start, &real_vb->buffer_offset, &real_vb->buffer); real_vb->buffer_offset -= start; } } -unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) -{ - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; - unsigned i, nr = mgr->ve->count; - struct pipe_vertex_element *velems = - mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; - unsigned result = ~0; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb = - &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index]; - unsigned size, max_count, value; - - /* We're not interested in constant and per-instance attribs. */ - if (!vb->buffer || - !vb->stride || - velems[i].instance_divisor) { - continue; - } - - size = vb->buffer->width0; - - /* Subtract buffer_offset. */ - value = vb->buffer_offset; - if (value >= size) { - return 0; - } - size -= value; - - /* Subtract src_offset. */ - value = velems[i].src_offset; - if (value >= size) { - return 0; - } - size -= value; - - /* Subtract format_size. */ - value = mgr->ve->native_format_size[i]; - if (value >= size) { - return 0; - } - size -= value; - - /* Compute the max count. */ - max_count = 1 + size / vb->stride; - result = MIN2(result, max_count); - } - return result; -} - -static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) +static boolean u_vbuf_need_minmax_index(struct u_vbuf *mgr) { - unsigned i, nr = mgr->ve->count; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb; - unsigned index; - - /* Per-instance attribs don't need min/max_index. */ - if (mgr->ve->ve[i].instance_divisor) { - continue; - } - - index = mgr->ve->ve[i].vertex_buffer_index; - vb = &mgr->b.vertex_buffer[index]; - - /* Constant attribs don't need min/max_index. */ - if (!vb->stride) { - continue; - } - - /* Per-vertex attribs need min/max_index. */ - if (u_vbuf_resource(vb->buffer)->user_ptr || - mgr->ve->incompatible_layout_elem[i] || - mgr->incompatible_vb[index]) { - return TRUE; - } - } - - return FALSE; + /* See if there are any per-vertex attribs which will be uploaded or + * translated. Use bitmasks to get the info instead of looping over vertex + * elements. */ + return ((mgr->user_vb_mask | mgr->incompatible_vb_mask | + mgr->ve->incompatible_vb_mask_any) & + mgr->ve->noninstance_vb_mask_any & mgr->nonzero_stride_vb_mask) != 0; } -static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf_priv *mgr) +static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf *mgr) { - unsigned i, nr = mgr->ve->count; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb; - unsigned index; - - /* Per-instance attribs are not per-vertex data. */ - if (mgr->ve->ve[i].instance_divisor) { - continue; - } - - index = mgr->ve->ve[i].vertex_buffer_index; - vb = &mgr->b.vertex_buffer[index]; - - /* Constant attribs are not per-vertex data. */ - if (!vb->stride) { - continue; - } - - /* Return true for the hw buffers which don't need to be translated. */ - /* XXX we could use some kind of a is-busy query. */ - if (!u_vbuf_resource(vb->buffer)->user_ptr && - !mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[index]) { - return TRUE; - } - } - - return FALSE; + /* Return true if there are hw buffers which don't need to be translated. + * + * We could query whether each buffer is busy, but that would + * be way more costly than this. */ + return (~mgr->user_vb_mask & ~mgr->incompatible_vb_mask & + mgr->ve->compatible_vb_mask_all & mgr->ve->noninstance_vb_mask_any & + mgr->nonzero_stride_vb_mask) != 0; } static void u_vbuf_get_minmax_index(struct pipe_context *pipe, @@ -956,8 +888,8 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, unsigned i; unsigned restart_index = info->restart_index; - if (u_vbuf_resource(ib->buffer)->user_ptr) { - indices = u_vbuf_resource(ib->buffer)->user_ptr + + if (ib->buffer->user_ptr) { + indices = ib->buffer->user_ptr + ib->offset + info->start * ib->index_size; } else { indices = pipe_buffer_map_range(pipe, ib->buffer, @@ -1044,38 +976,42 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, } } -enum u_vbuf_return_flags -u_vbuf_draw_begin(struct u_vbuf *mgrb, - struct pipe_draw_info *info) +void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + struct pipe_context *pipe = mgr->pipe; int start_vertex, min_index; unsigned num_vertices; - bool unroll_indices = false; + boolean unroll_indices = FALSE; + uint32_t user_vb_mask = mgr->user_vb_mask; + + /* Normal draw. No fallback and no user buffers. */ + if (!mgr->incompatible_vb_mask && + !mgr->ve->incompatible_elem_mask && + !user_vb_mask) { + /* Set vertex buffers if needed. */ + if (mgr->vertex_buffers_dirty) { + pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers, + mgr->real_vertex_buffer); + mgr->vertex_buffers_dirty = FALSE; + } - if (!mgr->incompatible_vb_layout && - !mgr->ve->incompatible_layout && - !mgr->any_user_vbs) { - return 0; + pipe->draw_vbo(pipe, info); + return; } if (info->indexed) { - int max_index; - bool index_bounds_valid = false; - - if (info->max_index != ~0) { - min_index = info->min_index; - max_index = info->max_index; - index_bounds_valid = true; - } else if (u_vbuf_need_minmax_index(mgr)) { - u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info, - &min_index, &max_index); - index_bounds_valid = true; - } + /* See if anything needs to be done for per-vertex attribs. */ + if (u_vbuf_need_minmax_index(mgr)) { + int max_index; + + if (info->max_index != ~0) { + min_index = info->min_index; + max_index = info->max_index; + } else { + u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, info, + &min_index, &max_index); + } - /* If the index bounds are valid, it means some upload or translation - * of per-vertex attribs will be performed. */ - if (index_bounds_valid) { assert(min_index <= max_index); start_vertex = min_index + info->index_bias; @@ -1090,7 +1026,9 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb, num_vertices-info->count > 32 && !u_vbuf_mapping_vertex_buffer_blocks(mgr)) { /*printf("num_vertices=%i count=%i\n", num_vertices, info->count);*/ - unroll_indices = true; + unroll_indices = TRUE; + user_vb_mask &= ~(mgr->nonzero_stride_vb_mask & + mgr->ve->noninstance_vb_mask_any); } } else { /* Nothing to do for per-vertex attribs. */ @@ -1106,17 +1044,20 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb, /* Translate vertices with non-native layouts or formats. */ if (unroll_indices || - mgr->incompatible_vb_layout || - mgr->ve->incompatible_layout) { + mgr->incompatible_vb_mask || + mgr->ve->incompatible_elem_mask) { /* XXX check the return value */ u_vbuf_translate_begin(mgr, start_vertex, num_vertices, info->start_instance, info->instance_count, info->start, info->count, min_index, unroll_indices); + + user_vb_mask &= ~(mgr->incompatible_vb_mask | + mgr->ve->incompatible_vb_mask_all); } /* Upload user buffers. */ - if (mgr->any_user_vbs) { + if (user_vb_mask) { u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, info->start_instance, info->instance_count); } @@ -1130,34 +1071,75 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb, } unsigned i; - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_vertex_buffers; i++) { printf("input %i: ", i); - util_dump_vertex_buffer(stdout, mgr->b.vertex_buffer+i); + util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i); printf("\n"); } - for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { printf("real %i: ", i); - util_dump_vertex_buffer(stdout, mgr->b.real_vertex_buffer+i); + util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i); printf("\n"); } */ - if (unroll_indices) { - info->indexed = FALSE; - info->index_bias = 0; - info->min_index = 0; - info->max_index = info->count - 1; - info->start = 0; + u_upload_unmap(mgr->uploader); + pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers, + mgr->real_vertex_buffer); + + if (unlikely(unroll_indices)) { + struct pipe_draw_info new_info = *info; + new_info.indexed = FALSE; + new_info.index_bias = 0; + new_info.min_index = 0; + new_info.max_index = info->count - 1; + new_info.start = 0; + + pipe->draw_vbo(pipe, &new_info); + } else { + pipe->draw_vbo(pipe, info); + } + + if (mgr->using_translate) { + u_vbuf_translate_end(mgr); } + mgr->vertex_buffers_dirty = TRUE; +} - return U_VBUF_BUFFERS_UPDATED; +void u_vbuf_save_vertex_elements(struct u_vbuf *mgr) +{ + assert(!mgr->ve_saved); + mgr->ve_saved = mgr->ve; } -void u_vbuf_draw_end(struct u_vbuf *mgrb) +void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + if (mgr->ve != mgr->ve_saved) { + struct pipe_context *pipe = mgr->pipe; - if (mgr->fallback_ve) { - u_vbuf_translate_end(mgr); + mgr->ve = mgr->ve_saved; + pipe->bind_vertex_elements_state(pipe, + mgr->ve ? mgr->ve->driver_cso : NULL); + } + mgr->ve_saved = NULL; +} + +void u_vbuf_save_vertex_buffers(struct u_vbuf *mgr) +{ + util_copy_vertex_buffers(mgr->vertex_buffer_saved, + &mgr->nr_vertex_buffers_saved, + mgr->vertex_buffer, + mgr->nr_vertex_buffers); +} + +void u_vbuf_restore_vertex_buffers(struct u_vbuf *mgr) +{ + unsigned i; + + u_vbuf_set_vertex_buffers(mgr, mgr->nr_vertex_buffers_saved, + mgr->vertex_buffer_saved); + for (i = 0; i < mgr->nr_vertex_buffers_saved; i++) { + pipe_resource_reference(&mgr->vertex_buffer_saved[i].buffer, NULL); } + mgr->nr_vertex_buffers_saved = 0; } diff --git a/mesalib/src/gallium/auxiliary/util/u_vbuf.h b/mesalib/src/gallium/auxiliary/util/u_vbuf.h index 3669c9b87..59eb59a11 100644 --- a/mesalib/src/gallium/auxiliary/util/u_vbuf.h +++ b/mesalib/src/gallium/auxiliary/util/u_vbuf.h @@ -25,8 +25,8 @@ * **************************************************************************/ -#ifndef U_VBUF_MGR_H -#define U_VBUF_MGR_H +#ifndef U_VBUF_H +#define U_VBUF_H /* This module builds upon u_upload_mgr and translate_cache and takes care of * user buffer uploads and vertex format fallbacks. It's designed @@ -35,7 +35,9 @@ #include "pipe/p_context.h" #include "pipe/p_state.h" -#include "util/u_transfer.h" + +struct cso_context; +struct u_vbuf; /* Hardware vertex fetcher limitations can be described by this structure. */ struct u_vbuf_caps { @@ -47,99 +49,38 @@ struct u_vbuf_caps { unsigned format_norm32:1; /* PIPE_FORMAT_*32*NORM */ unsigned format_scaled32:1; /* PIPE_FORMAT_*32*SCALED */ - /* Whether vertex fetches don't have to be dword-aligned. */ + /* Whether vertex fetches don't have to be 4-byte-aligned. */ /* TRUE if hardware supports it. */ - unsigned fetch_dword_unaligned:1; -}; - -/* The manager. - * This structure should also be used to access vertex buffers - * from a driver. */ -struct u_vbuf { - /* This is what was set in set_vertex_buffers. - * May contain user buffers. */ - struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; - unsigned nr_vertex_buffers; - - /* Contains only real vertex buffers. - * Hardware drivers should use real_vertex_buffers[i] - * instead of vertex_buffers[i].buffer. */ - struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS]; - int nr_real_vertex_buffers; - - /* The index buffer. */ - struct pipe_index_buffer index_buffer; - - /* This uploader can optionally be used by the driver. - * - * Allowed functions: - * - u_upload_alloc - * - u_upload_data - * - u_upload_buffer - * - u_upload_flush */ - struct u_upload_mgr *uploader; - - struct u_vbuf_caps caps; -}; - -struct u_vbuf_resource { - struct u_resource b; - uint8_t *user_ptr; -}; + unsigned buffer_offset_unaligned:1; + unsigned buffer_stride_unaligned:1; + unsigned velem_src_offset_unaligned:1; -/* Opaque type containing information about vertex elements for the manager. */ -struct u_vbuf_elements; - -enum u_fetch_alignment { - U_VERTEX_FETCH_BYTE_ALIGNED, - U_VERTEX_FETCH_DWORD_ALIGNED + /* Whether the driver supports user vertex buffers. */ + unsigned user_vertex_buffers:1; }; -enum u_vbuf_return_flags { - U_VBUF_BUFFERS_UPDATED = 1 -}; +void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps); struct u_vbuf * u_vbuf_create(struct pipe_context *pipe, - unsigned upload_buffer_size, - unsigned upload_buffer_alignment, - unsigned upload_buffer_bind, - enum u_fetch_alignment fetch_alignment); + struct u_vbuf_caps *caps); void u_vbuf_destroy(struct u_vbuf *mgr); -struct u_vbuf_elements * -u_vbuf_create_vertex_elements(struct u_vbuf *mgr, - unsigned count, - const struct pipe_vertex_element *attrs, - struct pipe_vertex_element *native_attrs); - -void u_vbuf_bind_vertex_elements(struct u_vbuf *mgr, - void *cso, - struct u_vbuf_elements *ve); - -void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, - struct u_vbuf_elements *ve); - -void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, - unsigned count, +/* State and draw functions. */ +void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *states); +void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, unsigned count, const struct pipe_vertex_buffer *bufs); - void u_vbuf_set_index_buffer(struct u_vbuf *mgr, const struct pipe_index_buffer *ib); +void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info); -enum u_vbuf_return_flags u_vbuf_draw_begin(struct u_vbuf *mgr, - struct pipe_draw_info *info); - -unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgr); - -void u_vbuf_draw_end(struct u_vbuf *mgr); - - -static INLINE struct u_vbuf_resource *u_vbuf_resource(struct pipe_resource *r) -{ - return (struct u_vbuf_resource*)r; -} +/* Save/restore functionality. */ +void u_vbuf_save_vertex_elements(struct u_vbuf *mgr); +void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr); +void u_vbuf_save_vertex_buffers(struct u_vbuf *mgr); +void u_vbuf_restore_vertex_buffers(struct u_vbuf *mgr); #endif |