diff options
author | marha <marha@users.sourceforge.net> | 2012-04-30 10:36:15 +0200 |
---|---|---|
committer | marha <marha@users.sourceforge.net> | 2012-04-30 10:36:15 +0200 |
commit | 762b7fde3d57d3a151f98535fd31516b7e823bc0 (patch) | |
tree | 11c32921b96808f2aab11a86863534fb28b274f6 /mesalib/src/gallium/auxiliary/util/u_vbuf.c | |
parent | b68922d51f52ca6ab9daa0105ef5c57f35bfbdcf (diff) | |
download | vcxsrv-762b7fde3d57d3a151f98535fd31516b7e823bc0.tar.gz vcxsrv-762b7fde3d57d3a151f98535fd31516b7e823bc0.tar.bz2 vcxsrv-762b7fde3d57d3a151f98535fd31516b7e823bc0.zip |
fontconfig libX11 libfontenc mesa pixman xserver git update 30 Apr 2012
Diffstat (limited to 'mesalib/src/gallium/auxiliary/util/u_vbuf.c')
-rw-r--r-- | mesalib/src/gallium/auxiliary/util/u_vbuf.c | 728 |
1 files changed, 355 insertions, 373 deletions
diff --git a/mesalib/src/gallium/auxiliary/util/u_vbuf.c b/mesalib/src/gallium/auxiliary/util/u_vbuf.c index 50c35afd5..401c8a4a8 100644 --- a/mesalib/src/gallium/auxiliary/util/u_vbuf.c +++ b/mesalib/src/gallium/auxiliary/util/u_vbuf.c @@ -52,9 +52,23 @@ struct u_vbuf_elements { /* This might mean two things: * - src_format != native_format, as discussed above. * - src_offset % 4 != 0 (if the caps don't allow such an offset). */ - boolean incompatible_layout; - /* Per-element flags. */ - boolean incompatible_layout_elem[PIPE_MAX_ATTRIBS]; + uint32_t incompatible_elem_mask; /* each bit describes a corresp. attrib */ + /* Which buffer has at least one vertex element referencing it + * incompatible. */ + uint32_t incompatible_vb_mask_any; + /* Which buffer has all vertex elements referencing it incompatible. */ + uint32_t incompatible_vb_mask_all; + /* Which buffer has at least one vertex element referencing it + * compatible. */ + uint32_t compatible_vb_mask_any; + /* Which buffer has all vertex elements referencing it compatible. */ + uint32_t compatible_vb_mask_all; + + /* Which buffer has at least one vertex element referencing it + * non-instanced. */ + uint32_t noninstance_vb_mask_any; + + void *driver_cso; }; enum { @@ -64,101 +78,128 @@ enum { VB_NUM = 3 }; -struct u_vbuf_priv { - struct u_vbuf b; +struct u_vbuf { + struct u_vbuf_caps caps; + struct pipe_context *pipe; struct translate_cache *translate_cache; struct cso_cache *cso_cache; + struct u_upload_mgr *uploader; - /* Vertex element state bound by the state tracker. */ - void *saved_ve; - /* and its associated helper structure for this module. */ - struct u_vbuf_elements *ve; + /* This is what was set in set_vertex_buffers. + * May contain user buffers. */ + struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; + unsigned nr_vertex_buffers; + + /* Saved vertex buffers. */ + struct pipe_vertex_buffer vertex_buffer_saved[PIPE_MAX_ATTRIBS]; + unsigned nr_vertex_buffers_saved; + + /* Vertex buffers for the driver. + * There are no user buffers. */ + struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS]; + int nr_real_vertex_buffers; + boolean vertex_buffers_dirty; + + /* The index buffer. */ + struct pipe_index_buffer index_buffer; + + /* Vertex elements. */ + struct u_vbuf_elements *ve, *ve_saved; /* Vertex elements used for the translate fallback. */ struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS]; /* If non-NULL, this is a vertex element state used for the translate * fallback and therefore used for rendering too. */ - void *fallback_ve; + boolean using_translate; /* The vertex buffer slot index where translated vertices have been * stored in. */ unsigned fallback_vbs[VB_NUM]; - /* When binding the fallback vertex element state, we don't want to - * change saved_ve and ve. This is set to TRUE in such cases. */ - boolean ve_binding_lock; - - /* Whether there is any user buffer. */ - boolean any_user_vbs; - /* Whether there is a buffer with a non-native layout. */ - boolean incompatible_vb_layout; - /* Per-buffer flags. */ - boolean incompatible_vb[PIPE_MAX_ATTRIBS]; + + /* Which buffer is a user buffer. */ + uint32_t user_vb_mask; /* each bit describes a corresp. buffer */ + /* Which buffer is incompatible (unaligned). */ + uint32_t incompatible_vb_mask; /* each bit describes a corresp. buffer */ + /* Which buffer has a non-zero stride. */ + uint32_t nonzero_stride_vb_mask; /* each bit describes a corresp. buffer */ }; -static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr) -{ - struct pipe_screen *screen = mgr->pipe->screen; +static void * +u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *attribs); +static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso); - mgr->b.caps.format_fixed32 = + +void u_vbuf_get_caps(struct pipe_screen *screen, struct u_vbuf_caps *caps) +{ + caps->format_fixed32 = screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_float16 = + caps->format_float16 = screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_float64 = + caps->format_float64 = screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_norm32 = + caps->format_norm32 = screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER) && screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); - mgr->b.caps.format_scaled32 = + caps->format_scaled32 = screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER) && screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER, 0, PIPE_BIND_VERTEX_BUFFER); + + caps->buffer_offset_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY); + + caps->buffer_stride_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY); + + caps->velem_src_offset_unaligned = + !screen->get_param(screen, + PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY); + + caps->user_vertex_buffers = + screen->get_param(screen, PIPE_CAP_USER_VERTEX_BUFFERS); } struct u_vbuf * u_vbuf_create(struct pipe_context *pipe, - unsigned upload_buffer_size, - unsigned upload_buffer_alignment, - unsigned upload_buffer_bind, - enum u_fetch_alignment fetch_alignment) + struct u_vbuf_caps *caps) { - struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv); + struct u_vbuf *mgr = CALLOC_STRUCT(u_vbuf); + mgr->caps = *caps; mgr->pipe = pipe; mgr->cso_cache = cso_cache_create(); mgr->translate_cache = translate_cache_create(); memset(mgr->fallback_vbs, ~0, sizeof(mgr->fallback_vbs)); - mgr->b.uploader = u_upload_create(pipe, upload_buffer_size, - upload_buffer_alignment, - upload_buffer_bind); - - mgr->b.caps.fetch_dword_unaligned = - fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED; - - u_vbuf_init_format_caps(mgr); + mgr->uploader = u_upload_create(pipe, 1024 * 1024, 4, + PIPE_BIND_VERTEX_BUFFER); - return &mgr->b; + return mgr; } -/* XXX I had to fork this off of cso_context. */ -static void * -u_vbuf_pipe_set_vertex_elements(struct u_vbuf_priv *mgr, - unsigned count, - const struct pipe_vertex_element *states) +/* u_vbuf uses its own caching for vertex elements, because it needs to keep + * its own preprocessed state per vertex element CSO. */ +static struct u_vbuf_elements * +u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *states) { + struct pipe_context *pipe = mgr->pipe; unsigned key_size, hash_key; struct cso_hash_iter iter; - void *handle; + struct u_vbuf_elements *ve; struct cso_velems_state velems_state; /* need to include the count into the stored state data too. */ @@ -173,47 +214,52 @@ u_vbuf_pipe_set_vertex_elements(struct u_vbuf_priv *mgr, if (cso_hash_iter_is_null(iter)) { struct cso_velements *cso = MALLOC_STRUCT(cso_velements); memcpy(&cso->state, &velems_state, key_size); - cso->data = - mgr->pipe->create_vertex_elements_state(mgr->pipe, count, - &cso->state.velems[0]); - cso->delete_state = - (cso_state_callback)mgr->pipe->delete_vertex_elements_state; - cso->context = mgr->pipe; + cso->data = u_vbuf_create_vertex_elements(mgr, count, states); + cso->delete_state = (cso_state_callback)u_vbuf_delete_vertex_elements; + cso->context = (void*)mgr; iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso); - handle = cso->data; + ve = cso->data; } else { - handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data; + ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data; } - mgr->pipe->bind_vertex_elements_state(mgr->pipe, handle); - return handle; + assert(ve); + pipe->bind_vertex_elements_state(pipe, ve->driver_cso); + return ve; +} + +void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *states) +{ + mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, count, states); } -void u_vbuf_destroy(struct u_vbuf *mgrb) +void u_vbuf_destroy(struct u_vbuf *mgr) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; unsigned i; - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); + mgr->pipe->set_vertex_buffers(mgr->pipe, 0, NULL); + + for (i = 0; i < mgr->nr_vertex_buffers; i++) { + pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); } - for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { + pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); } translate_cache_destroy(mgr->translate_cache); - u_upload_destroy(mgr->b.uploader); + u_upload_destroy(mgr->uploader); cso_cache_delete(mgr->cso_cache); FREE(mgr); } static void -u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, +u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, unsigned vb_mask, unsigned out_vb, int start_vertex, unsigned num_vertices, int start_index, unsigned num_indices, int min_index, - bool unroll_indices) + boolean unroll_indices) { struct translate *tr; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; @@ -225,14 +271,14 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, tr = translate_cache_find(mgr->translate_cache, key); /* Map buffers we want to translate. */ - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_vertex_buffers; i++) { if (vb_mask & (1 << i)) { - struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; + struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[i]; unsigned offset = vb->buffer_offset + vb->stride * start_vertex; uint8_t *map; - if (u_vbuf_resource(vb->buffer)->user_ptr) { - map = u_vbuf_resource(vb->buffer)->user_ptr + offset; + if (vb->buffer->user_ptr) { + map = vb->buffer->user_ptr + offset; } else { unsigned size = vb->stride ? num_vertices * vb->stride : sizeof(double)*4; @@ -256,15 +302,15 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, /* Translate. */ if (unroll_indices) { - struct pipe_index_buffer *ib = &mgr->b.index_buffer; + struct pipe_index_buffer *ib = &mgr->index_buffer; struct pipe_transfer *transfer = NULL; unsigned offset = ib->offset + start_index * ib->index_size; uint8_t *map; assert(ib->buffer && ib->index_size); - if (u_vbuf_resource(ib->buffer)->user_ptr) { - map = u_vbuf_resource(ib->buffer)->user_ptr + offset; + if (ib->buffer->user_ptr) { + map = ib->buffer->user_ptr + offset; } else { map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, num_indices * ib->index_size, @@ -272,7 +318,7 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, } /* Create and map the output buffer. */ - u_upload_alloc(mgr->b.uploader, 0, + u_upload_alloc(mgr->uploader, 0, key->output_stride * num_indices, &out_offset, &out_buffer, (void**)&out_map); @@ -294,7 +340,7 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, } } else { /* Create and map the output buffer. */ - u_upload_alloc(mgr->b.uploader, + u_upload_alloc(mgr->uploader, key->output_stride * start_vertex, key->output_stride * num_vertices, &out_offset, &out_buffer, @@ -306,64 +352,52 @@ u_vbuf_translate_buffers(struct u_vbuf_priv *mgr, struct translate_key *key, } /* Unmap all buffers. */ - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_vertex_buffers; i++) { if (vb_transfer[i]) { pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); } } /* Setup the new vertex buffer. */ - mgr->b.real_vertex_buffer[out_vb].buffer_offset = out_offset; - mgr->b.real_vertex_buffer[out_vb].stride = key->output_stride; + mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset; + mgr->real_vertex_buffer[out_vb].stride = key->output_stride; /* Move the buffer reference. */ pipe_resource_reference( - &mgr->b.real_vertex_buffer[out_vb].buffer, NULL); - mgr->b.real_vertex_buffer[out_vb].buffer = out_buffer; + &mgr->real_vertex_buffer[out_vb].buffer, NULL); + mgr->real_vertex_buffer[out_vb].buffer = out_buffer; } static boolean -u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr, +u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr, unsigned mask[VB_NUM]) { - unsigned i, type; - unsigned nr = mgr->ve->count; - boolean used_vb[PIPE_MAX_ATTRIBS] = {0}; + unsigned type; unsigned fallback_vbs[VB_NUM]; + /* Set the bit for each buffer which is incompatible, or isn't set. */ + uint32_t unused_vb_mask = + mgr->ve->incompatible_vb_mask_all | mgr->incompatible_vb_mask | + ~((1 << mgr->nr_vertex_buffers) - 1); memset(fallback_vbs, ~0, sizeof(fallback_vbs)); - /* Mark used vertex buffers as... used. */ - for (i = 0; i < nr; i++) { - if (!mgr->ve->incompatible_layout_elem[i]) { - unsigned index = mgr->ve->ve[i].vertex_buffer_index; - - if (!mgr->incompatible_vb[index]) { - used_vb[index] = TRUE; - } - } - } - /* Find free slots for each type if needed. */ - i = 0; for (type = 0; type < VB_NUM; type++) { if (mask[type]) { - for (; i < PIPE_MAX_ATTRIBS; i++) { - if (!used_vb[i]) { - /*printf("found slot=%i for type=%i\n", i, type);*/ - fallback_vbs[type] = i; - i++; - if (i > mgr->b.nr_real_vertex_buffers) { - mgr->b.nr_real_vertex_buffers = i; - } - break; - } - } - if (i == PIPE_MAX_ATTRIBS) { + uint32_t index; + + if (!unused_vb_mask) { /* fail, reset the number to its original value */ - mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; + mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers; return FALSE; } + + index = ffs(unused_vb_mask) - 1; + fallback_vbs[type] = index; + if (index >= mgr->nr_real_vertex_buffers) { + mgr->nr_real_vertex_buffers = index + 1; + } + /*printf("found slot=%i for type=%i\n", index, type);*/ } } @@ -372,11 +406,11 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf_priv *mgr, } static boolean -u_vbuf_translate_begin(struct u_vbuf_priv *mgr, +u_vbuf_translate_begin(struct u_vbuf *mgr, int start_vertex, unsigned num_vertices, int start_instance, unsigned num_instances, int start_index, unsigned num_indices, int min_index, - bool unroll_indices) + boolean unroll_indices) { unsigned mask[VB_NUM] = {0}; struct translate_key key[VB_NUM]; @@ -403,22 +437,22 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, for (i = 0; i < mgr->ve->count; i++) { unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index; - if (!mgr->b.vertex_buffer[vb_index].stride) { - if (!mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index]) { + if (!mgr->vertex_buffer[vb_index].stride) { + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index))) { continue; } mask[VB_CONST] |= 1 << vb_index; } else if (mgr->ve->ve[i].instance_divisor) { - if (!mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index]) { + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index))) { continue; } mask[VB_INSTANCE] |= 1 << vb_index; } else { if (!unroll_indices && - !mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index]) { + !(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index))) { continue; } mask[VB_VERTEX] |= 1 << vb_index; @@ -439,8 +473,8 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, unsigned bit, vb_index = mgr->ve->ve[i].vertex_buffer_index; bit = 1 << vb_index; - if (!mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[vb_index] && + if (!(mgr->ve->incompatible_elem_mask & (1 << i)) && + !(mgr->incompatible_vb_mask & (1 << vb_index)) && (!unroll_indices || !(mask[VB_VERTEX] & bit))) { continue; } @@ -484,7 +518,7 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, /* Fixup the stride for constant attribs. */ if (type == VB_CONST) { - mgr->b.real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; + mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0; } } } @@ -512,55 +546,48 @@ u_vbuf_translate_begin(struct u_vbuf_priv *mgr, } } - /* Preserve saved_ve. */ - mgr->ve_binding_lock = TRUE; - mgr->fallback_ve = u_vbuf_pipe_set_vertex_elements(mgr, mgr->ve->count, - mgr->fallback_velems); - mgr->ve_binding_lock = FALSE; + u_vbuf_set_vertex_elements_internal(mgr, mgr->ve->count, + mgr->fallback_velems); + mgr->using_translate = TRUE; return TRUE; } -static void u_vbuf_translate_end(struct u_vbuf_priv *mgr) +static void u_vbuf_translate_end(struct u_vbuf *mgr) { unsigned i; /* Restore vertex elements. */ - /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */ - mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve); - mgr->fallback_ve = NULL; + mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso); + mgr->using_translate = FALSE; /* Unreference the now-unused VBOs. */ for (i = 0; i < VB_NUM; i++) { unsigned vb = mgr->fallback_vbs[i]; if (vb != ~0) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[vb].buffer, NULL); + pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL); mgr->fallback_vbs[i] = ~0; } } - mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers; + mgr->nr_real_vertex_buffers = mgr->nr_vertex_buffers; } #define FORMAT_REPLACE(what, withwhat) \ case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break -struct u_vbuf_elements * -u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, - unsigned count, - const struct pipe_vertex_element *attribs, - struct pipe_vertex_element *native_attribs) +static void * +u_vbuf_create_vertex_elements(struct u_vbuf *mgr, unsigned count, + const struct pipe_vertex_element *attribs) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + struct pipe_context *pipe = mgr->pipe; unsigned i; + struct pipe_vertex_element driver_attribs[PIPE_MAX_ATTRIBS]; struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements); + uint32_t used_buffers = 0; ve->count = count; - if (!count) { - return ve; - } - memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count); - memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count); + memcpy(driver_attribs, attribs, sizeof(struct pipe_vertex_element) * count); /* Set the best native format in case the original format is not * supported. */ @@ -569,10 +596,16 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, ve->src_format_size[i] = util_format_get_blocksize(format); + used_buffers |= 1 << ve->ve[i].vertex_buffer_index; + + if (!ve->ve[i].instance_divisor) { + ve->noninstance_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } + /* Choose a native format. * For now we don't care about the alignment, that's going to * be sorted out later. */ - if (!mgr->b.caps.format_fixed32) { + if (!mgr->caps.format_fixed32) { switch (format) { FORMAT_REPLACE(R32_FIXED, R32_FLOAT); FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT); @@ -581,7 +614,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_float16) { + if (!mgr->caps.format_float16) { switch (format) { FORMAT_REPLACE(R16_FLOAT, R32_FLOAT); FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT); @@ -590,7 +623,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_float64) { + if (!mgr->caps.format_float64) { switch (format) { FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); @@ -599,7 +632,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_norm32) { + if (!mgr->caps.format_norm32) { switch (format) { FORMAT_REPLACE(R32_UNORM, R32_FLOAT); FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT); @@ -612,7 +645,7 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, default:; } } - if (!mgr->b.caps.format_scaled32) { + if (!mgr->caps.format_scaled32) { switch (format) { FORMAT_REPLACE(R32_USCALED, R32_FLOAT); FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT); @@ -626,115 +659,106 @@ u_vbuf_create_vertex_elements(struct u_vbuf *mgrb, } } - native_attribs[i].src_format = format; + driver_attribs[i].src_format = format; ve->native_format[i] = format; ve->native_format_size[i] = util_format_get_blocksize(ve->native_format[i]); - ve->incompatible_layout_elem[i] = - ve->ve[i].src_format != ve->native_format[i] || - (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0); - ve->incompatible_layout = - ve->incompatible_layout || - ve->incompatible_layout_elem[i]; + if (ve->ve[i].src_format != format || + (!mgr->caps.velem_src_offset_unaligned && + ve->ve[i].src_offset % 4 != 0)) { + ve->incompatible_elem_mask |= 1 << i; + ve->incompatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } else { + ve->compatible_vb_mask_any |= 1 << ve->ve[i].vertex_buffer_index; + } } + ve->compatible_vb_mask_all = ~ve->incompatible_vb_mask_any & used_buffers; + ve->incompatible_vb_mask_all = ~ve->compatible_vb_mask_any & used_buffers; + /* Align the formats to the size of DWORD if needed. */ - if (!mgr->b.caps.fetch_dword_unaligned) { + if (!mgr->caps.velem_src_offset_unaligned) { for (i = 0; i < count; i++) { ve->native_format_size[i] = align(ve->native_format_size[i], 4); } } + ve->driver_cso = + pipe->create_vertex_elements_state(pipe, count, driver_attribs); return ve; } -void u_vbuf_bind_vertex_elements(struct u_vbuf *mgrb, - void *cso, - struct u_vbuf_elements *ve) +static void u_vbuf_delete_vertex_elements(struct u_vbuf *mgr, void *cso) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; - - if (!cso) { - return; - } - - if (!mgr->ve_binding_lock) { - mgr->saved_ve = cso; - mgr->ve = ve; - } -} + struct pipe_context *pipe = mgr->pipe; + struct u_vbuf_elements *ve = cso; -void u_vbuf_destroy_vertex_elements(struct u_vbuf *mgr, - struct u_vbuf_elements *ve) -{ + pipe->delete_vertex_elements_state(pipe, ve->driver_cso); FREE(ve); } -void u_vbuf_set_vertex_buffers(struct u_vbuf *mgrb, - unsigned count, +void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, unsigned count, const struct pipe_vertex_buffer *bufs) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; unsigned i; - mgr->any_user_vbs = FALSE; - mgr->incompatible_vb_layout = FALSE; - memset(mgr->incompatible_vb, 0, sizeof(mgr->incompatible_vb)); - - if (!mgr->b.caps.fetch_dword_unaligned) { - /* Check if the strides and offsets are aligned to the size of DWORD. */ - for (i = 0; i < count; i++) { - if (bufs[i].buffer) { - if (bufs[i].stride % 4 != 0 || - bufs[i].buffer_offset % 4 != 0) { - mgr->incompatible_vb_layout = TRUE; - mgr->incompatible_vb[i] = TRUE; - } - } - } - } + mgr->user_vb_mask = 0; + mgr->incompatible_vb_mask = 0; + mgr->nonzero_stride_vb_mask = 0; for (i = 0; i < count; i++) { const struct pipe_vertex_buffer *vb = &bufs[i]; + struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[i]; + struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[i]; - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer); + pipe_resource_reference(&orig_vb->buffer, vb->buffer); - mgr->b.real_vertex_buffer[i].buffer_offset = - mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset; + real_vb->buffer_offset = orig_vb->buffer_offset = vb->buffer_offset; + real_vb->stride = orig_vb->stride = vb->stride; - mgr->b.real_vertex_buffer[i].stride = - mgr->b.vertex_buffer[i].stride = vb->stride; + if (vb->stride) { + mgr->nonzero_stride_vb_mask |= 1 << i; + } + + if (!vb->buffer) { + pipe_resource_reference(&real_vb->buffer, NULL); + continue; + } - if (!vb->buffer || - mgr->incompatible_vb[i]) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) || + (!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) { + mgr->incompatible_vb_mask |= 1 << i; + pipe_resource_reference(&real_vb->buffer, NULL); continue; } - if (u_vbuf_resource(vb->buffer)->user_ptr) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); - mgr->any_user_vbs = TRUE; + if (!mgr->caps.user_vertex_buffers && vb->buffer->user_ptr) { + mgr->user_vb_mask |= 1 << i; + pipe_resource_reference(&real_vb->buffer, NULL); continue; } - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer); + pipe_resource_reference(&real_vb->buffer, vb->buffer); } - for (i = count; i < mgr->b.nr_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); + for (i = count; i < mgr->nr_vertex_buffers; i++) { + pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); } - for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) { - pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); + for (i = count; i < mgr->nr_real_vertex_buffers; i++) { + pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); } - mgr->b.nr_vertex_buffers = count; - mgr->b.nr_real_vertex_buffers = count; + mgr->nr_vertex_buffers = count; + mgr->nr_real_vertex_buffers = count; + mgr->vertex_buffers_dirty = TRUE; } void u_vbuf_set_index_buffer(struct u_vbuf *mgr, const struct pipe_index_buffer *ib) { + struct pipe_context *pipe = mgr->pipe; + if (ib && ib->buffer) { assert(ib->offset % ib->index_size == 0); pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer); @@ -743,18 +767,20 @@ void u_vbuf_set_index_buffer(struct u_vbuf *mgr, } else { pipe_resource_reference(&mgr->index_buffer.buffer, NULL); } + + pipe->set_index_buffer(pipe, ib); } static void -u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, +u_vbuf_upload_buffers(struct u_vbuf *mgr, int start_vertex, unsigned num_vertices, int start_instance, unsigned num_instances) { unsigned i; unsigned nr_velems = mgr->ve->count; - unsigned nr_vbufs = mgr->b.nr_vertex_buffers; + unsigned nr_vbufs = mgr->nr_vertex_buffers; struct pipe_vertex_element *velems = - mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; + mgr->using_translate ? mgr->fallback_velems : mgr->ve->ve; unsigned start_offset[PIPE_MAX_ATTRIBS]; unsigned end_offset[PIPE_MAX_ATTRIBS] = {0}; @@ -762,7 +788,7 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, for (i = 0; i < nr_velems; i++) { struct pipe_vertex_element *velem = &velems[i]; unsigned index = velem->vertex_buffer_index; - struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index]; + struct pipe_vertex_buffer *vb = &mgr->vertex_buffer[index]; unsigned instance_div, first, size; /* Skip the buffers generated by translate. */ @@ -774,7 +800,7 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, assert(vb->buffer); - if (!u_vbuf_resource(vb->buffer)->user_ptr) { + if (!vb->buffer->user_ptr) { continue; } @@ -820,129 +846,35 @@ u_vbuf_upload_buffers(struct u_vbuf_priv *mgr, start = start_offset[i]; assert(start < end); - real_vb = &mgr->b.real_vertex_buffer[i]; - ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr; + real_vb = &mgr->real_vertex_buffer[i]; + ptr = mgr->vertex_buffer[i].buffer->user_ptr; - u_upload_data(mgr->b.uploader, start, end - start, ptr + start, + u_upload_data(mgr->uploader, start, end - start, ptr + start, &real_vb->buffer_offset, &real_vb->buffer); real_vb->buffer_offset -= start; } } -unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf *mgrb) -{ - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; - unsigned i, nr = mgr->ve->count; - struct pipe_vertex_element *velems = - mgr->fallback_ve ? mgr->fallback_velems : mgr->ve->ve; - unsigned result = ~0; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb = - &mgr->b.real_vertex_buffer[velems[i].vertex_buffer_index]; - unsigned size, max_count, value; - - /* We're not interested in constant and per-instance attribs. */ - if (!vb->buffer || - !vb->stride || - velems[i].instance_divisor) { - continue; - } - - size = vb->buffer->width0; - - /* Subtract buffer_offset. */ - value = vb->buffer_offset; - if (value >= size) { - return 0; - } - size -= value; - - /* Subtract src_offset. */ - value = velems[i].src_offset; - if (value >= size) { - return 0; - } - size -= value; - - /* Subtract format_size. */ - value = mgr->ve->native_format_size[i]; - if (value >= size) { - return 0; - } - size -= value; - - /* Compute the max count. */ - max_count = 1 + size / vb->stride; - result = MIN2(result, max_count); - } - return result; -} - -static boolean u_vbuf_need_minmax_index(struct u_vbuf_priv *mgr) +static boolean u_vbuf_need_minmax_index(struct u_vbuf *mgr) { - unsigned i, nr = mgr->ve->count; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb; - unsigned index; - - /* Per-instance attribs don't need min/max_index. */ - if (mgr->ve->ve[i].instance_divisor) { - continue; - } - - index = mgr->ve->ve[i].vertex_buffer_index; - vb = &mgr->b.vertex_buffer[index]; - - /* Constant attribs don't need min/max_index. */ - if (!vb->stride) { - continue; - } - - /* Per-vertex attribs need min/max_index. */ - if (u_vbuf_resource(vb->buffer)->user_ptr || - mgr->ve->incompatible_layout_elem[i] || - mgr->incompatible_vb[index]) { - return TRUE; - } - } - - return FALSE; + /* See if there are any per-vertex attribs which will be uploaded or + * translated. Use bitmasks to get the info instead of looping over vertex + * elements. */ + return ((mgr->user_vb_mask | mgr->incompatible_vb_mask | + mgr->ve->incompatible_vb_mask_any) & + mgr->ve->noninstance_vb_mask_any & mgr->nonzero_stride_vb_mask) != 0; } -static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf_priv *mgr) +static boolean u_vbuf_mapping_vertex_buffer_blocks(struct u_vbuf *mgr) { - unsigned i, nr = mgr->ve->count; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb; - unsigned index; - - /* Per-instance attribs are not per-vertex data. */ - if (mgr->ve->ve[i].instance_divisor) { - continue; - } - - index = mgr->ve->ve[i].vertex_buffer_index; - vb = &mgr->b.vertex_buffer[index]; - - /* Constant attribs are not per-vertex data. */ - if (!vb->stride) { - continue; - } - - /* Return true for the hw buffers which don't need to be translated. */ - /* XXX we could use some kind of a is-busy query. */ - if (!u_vbuf_resource(vb->buffer)->user_ptr && - !mgr->ve->incompatible_layout_elem[i] && - !mgr->incompatible_vb[index]) { - return TRUE; - } - } - - return FALSE; + /* Return true if there are hw buffers which don't need to be translated. + * + * We could query whether each buffer is busy, but that would + * be way more costly than this. */ + return (~mgr->user_vb_mask & ~mgr->incompatible_vb_mask & + mgr->ve->compatible_vb_mask_all & mgr->ve->noninstance_vb_mask_any & + mgr->nonzero_stride_vb_mask) != 0; } static void u_vbuf_get_minmax_index(struct pipe_context *pipe, @@ -956,8 +888,8 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, unsigned i; unsigned restart_index = info->restart_index; - if (u_vbuf_resource(ib->buffer)->user_ptr) { - indices = u_vbuf_resource(ib->buffer)->user_ptr + + if (ib->buffer->user_ptr) { + indices = ib->buffer->user_ptr + ib->offset + info->start * ib->index_size; } else { indices = pipe_buffer_map_range(pipe, ib->buffer, @@ -1044,38 +976,42 @@ static void u_vbuf_get_minmax_index(struct pipe_context *pipe, } } -enum u_vbuf_return_flags -u_vbuf_draw_begin(struct u_vbuf *mgrb, - struct pipe_draw_info *info) +void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + struct pipe_context *pipe = mgr->pipe; int start_vertex, min_index; unsigned num_vertices; - bool unroll_indices = false; + boolean unroll_indices = FALSE; + uint32_t user_vb_mask = mgr->user_vb_mask; + + /* Normal draw. No fallback and no user buffers. */ + if (!mgr->incompatible_vb_mask && + !mgr->ve->incompatible_elem_mask && + !user_vb_mask) { + /* Set vertex buffers if needed. */ + if (mgr->vertex_buffers_dirty) { + pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers, + mgr->real_vertex_buffer); + mgr->vertex_buffers_dirty = FALSE; + } - if (!mgr->incompatible_vb_layout && - !mgr->ve->incompatible_layout && - !mgr->any_user_vbs) { - return 0; + pipe->draw_vbo(pipe, info); + return; } if (info->indexed) { - int max_index; - bool index_bounds_valid = false; - - if (info->max_index != ~0) { - min_index = info->min_index; - max_index = info->max_index; - index_bounds_valid = true; - } else if (u_vbuf_need_minmax_index(mgr)) { - u_vbuf_get_minmax_index(mgr->pipe, &mgr->b.index_buffer, info, - &min_index, &max_index); - index_bounds_valid = true; - } + /* See if anything needs to be done for per-vertex attribs. */ + if (u_vbuf_need_minmax_index(mgr)) { + int max_index; + + if (info->max_index != ~0) { + min_index = info->min_index; + max_index = info->max_index; + } else { + u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer, info, + &min_index, &max_index); + } - /* If the index bounds are valid, it means some upload or translation - * of per-vertex attribs will be performed. */ - if (index_bounds_valid) { assert(min_index <= max_index); start_vertex = min_index + info->index_bias; @@ -1090,7 +1026,9 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb, num_vertices-info->count > 32 && !u_vbuf_mapping_vertex_buffer_blocks(mgr)) { /*printf("num_vertices=%i count=%i\n", num_vertices, info->count);*/ - unroll_indices = true; + unroll_indices = TRUE; + user_vb_mask &= ~(mgr->nonzero_stride_vb_mask & + mgr->ve->noninstance_vb_mask_any); } } else { /* Nothing to do for per-vertex attribs. */ @@ -1106,17 +1044,20 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb, /* Translate vertices with non-native layouts or formats. */ if (unroll_indices || - mgr->incompatible_vb_layout || - mgr->ve->incompatible_layout) { + mgr->incompatible_vb_mask || + mgr->ve->incompatible_elem_mask) { /* XXX check the return value */ u_vbuf_translate_begin(mgr, start_vertex, num_vertices, info->start_instance, info->instance_count, info->start, info->count, min_index, unroll_indices); + + user_vb_mask &= ~(mgr->incompatible_vb_mask | + mgr->ve->incompatible_vb_mask_all); } /* Upload user buffers. */ - if (mgr->any_user_vbs) { + if (user_vb_mask) { u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, info->start_instance, info->instance_count); } @@ -1130,34 +1071,75 @@ u_vbuf_draw_begin(struct u_vbuf *mgrb, } unsigned i; - for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_vertex_buffers; i++) { printf("input %i: ", i); - util_dump_vertex_buffer(stdout, mgr->b.vertex_buffer+i); + util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i); printf("\n"); } - for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { + for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { printf("real %i: ", i); - util_dump_vertex_buffer(stdout, mgr->b.real_vertex_buffer+i); + util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i); printf("\n"); } */ - if (unroll_indices) { - info->indexed = FALSE; - info->index_bias = 0; - info->min_index = 0; - info->max_index = info->count - 1; - info->start = 0; + u_upload_unmap(mgr->uploader); + pipe->set_vertex_buffers(pipe, mgr->nr_real_vertex_buffers, + mgr->real_vertex_buffer); + + if (unlikely(unroll_indices)) { + struct pipe_draw_info new_info = *info; + new_info.indexed = FALSE; + new_info.index_bias = 0; + new_info.min_index = 0; + new_info.max_index = info->count - 1; + new_info.start = 0; + + pipe->draw_vbo(pipe, &new_info); + } else { + pipe->draw_vbo(pipe, info); + } + + if (mgr->using_translate) { + u_vbuf_translate_end(mgr); } + mgr->vertex_buffers_dirty = TRUE; +} - return U_VBUF_BUFFERS_UPDATED; +void u_vbuf_save_vertex_elements(struct u_vbuf *mgr) +{ + assert(!mgr->ve_saved); + mgr->ve_saved = mgr->ve; } -void u_vbuf_draw_end(struct u_vbuf *mgrb) +void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr) { - struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; + if (mgr->ve != mgr->ve_saved) { + struct pipe_context *pipe = mgr->pipe; - if (mgr->fallback_ve) { - u_vbuf_translate_end(mgr); + mgr->ve = mgr->ve_saved; + pipe->bind_vertex_elements_state(pipe, + mgr->ve ? mgr->ve->driver_cso : NULL); + } + mgr->ve_saved = NULL; +} + +void u_vbuf_save_vertex_buffers(struct u_vbuf *mgr) +{ + util_copy_vertex_buffers(mgr->vertex_buffer_saved, + &mgr->nr_vertex_buffers_saved, + mgr->vertex_buffer, + mgr->nr_vertex_buffers); +} + +void u_vbuf_restore_vertex_buffers(struct u_vbuf *mgr) +{ + unsigned i; + + u_vbuf_set_vertex_buffers(mgr, mgr->nr_vertex_buffers_saved, + mgr->vertex_buffer_saved); + for (i = 0; i < mgr->nr_vertex_buffers_saved; i++) { + pipe_resource_reference(&mgr->vertex_buffer_saved[i].buffer, NULL); } + mgr->nr_vertex_buffers_saved = 0; } |