aboutsummaryrefslogtreecommitdiff
path: root/libxcb/src
diff options
context:
space:
mode:
Diffstat (limited to 'libxcb/src')
-rw-r--r--libxcb/src/xcb_in.c146
-rw-r--r--libxcb/src/xcb_out.c734
-rw-r--r--libxcb/src/xcbint.h2
3 files changed, 430 insertions, 452 deletions
diff --git a/libxcb/src/xcb_in.c b/libxcb/src/xcb_in.c
index b4c48fb67..c9cb5f445 100644
--- a/libxcb/src/xcb_in.c
+++ b/libxcb/src/xcb_in.c
@@ -75,7 +75,7 @@ typedef struct pending_reply {
} pending_reply;
typedef struct reader_list {
- unsigned int request;
+ uint64_t request;
pthread_cond_t *data;
struct reader_list *next;
} reader_list;
@@ -208,10 +208,10 @@ static int read_packet(xcb_connection_t *c)
c->in.current_reply_tail = &cur->next;
for(reader = c->in.readers;
reader &&
- XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
+ XCB_SEQUENCE_COMPARE(reader->request, <=, c->in.request_read);
reader = reader->next)
{
- if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
+ if(reader->request == c->in.request_read)
{
pthread_cond_signal(reader->data);
break;
@@ -301,7 +301,7 @@ static int read_block(const int fd, void *buf, const ssize_t len)
return len;
}
-static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
+static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
{
struct reply_list *head;
@@ -310,7 +310,7 @@ static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **repl
head = 0;
/* We've read requests past the one we want, so if it has replies we have
* them all and they're in the replies map. */
- else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
+ else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
{
head = _xcb_map_remove(c->in.replies, request);
if(head && head->next)
@@ -318,7 +318,7 @@ static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **repl
}
/* We're currently processing the responses to the request we want, and we
* have a reply ready to return. So just return it without blocking. */
- else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
+ else if(request == c->in.request_read && c->in.current_reply)
{
head = c->in.current_reply;
c->in.current_reply = head->next;
@@ -327,7 +327,7 @@ static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **repl
}
/* We know this request can't have any more replies, and we've already
* established it doesn't have a reply now. Don't bother blocking. */
- else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
+ else if(request == c->in.request_completed)
head = 0;
/* We may have more replies on the way for this request: block until we're
* sure. */
@@ -356,25 +356,12 @@ static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **repl
return 1;
}
-/* Public interface */
-
-void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
+static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
{
- uint64_t widened_request;
void *ret = 0;
- if(e)
- *e = 0;
- if(c->has_error)
- return 0;
-
- pthread_mutex_lock(&c->iolock);
-
- widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
- if(widened_request > c->out.request)
- widened_request -= UINT64_C(1) << 32;
/* If this request has not been written yet, write it. */
- if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
+ if(c->out.return_socket || _xcb_out_flush_to(c, request))
{
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
reader_list reader;
@@ -382,7 +369,7 @@ void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_
for(prev_reader = &c->in.readers;
*prev_reader &&
- XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
+ XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request);
prev_reader = &(*prev_reader)->next)
{
/* empty */;
@@ -398,7 +385,7 @@ void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_
for(prev_reader = &c->in.readers;
*prev_reader &&
- XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
+ XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request);
prev_reader = &(*prev_reader)->next)
{
if(*prev_reader == &reader)
@@ -411,6 +398,29 @@ void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_
}
_xcb_in_wake_up_next_reader(c);
+ return ret;
+}
+
+static uint64_t widen(xcb_connection_t *c, unsigned int request)
+{
+ uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
+ if(widened_request > c->out.request)
+ widened_request -= UINT64_C(1) << 32;
+ return widened_request;
+}
+
+/* Public interface */
+
+void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
+{
+ void *ret;
+ if(e)
+ *e = 0;
+ if(c->has_error)
+ return 0;
+
+ pthread_mutex_lock(&c->iolock);
+ ret = wait_for_reply(c, widen(c, request), e);
pthread_mutex_unlock(&c->iolock);
return ret;
}
@@ -436,66 +446,27 @@ static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_nex
c->in.pending_replies_tail = &pend->next;
}
-static void discard_reply(xcb_connection_t *c, unsigned int request)
+static void discard_reply(xcb_connection_t *c, uint64_t request)
{
- pending_reply *pend = 0;
+ void *reply;
pending_reply **prev_pend;
- uint64_t widened_request;
- /* We've read requests past the one we want, so if it has replies we have
- * them all and they're in the replies map. */
- if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
- {
- struct reply_list *head;
- head = _xcb_map_remove(c->in.replies, request);
- while (head)
- {
- struct reply_list *next = head->next;
- free(head->reply);
- free(head);
- head = next;
- }
- return;
- }
-
- /* We're currently processing the responses to the request we want, and we
- * have a reply ready to return. Free it, and mark the pend to free any further
- * replies. */
- if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
- {
- struct reply_list *head;
- head = c->in.current_reply;
- c->in.current_reply = NULL;
- c->in.current_reply_tail = &c->in.current_reply;
- while (head)
- {
- struct reply_list *next = head->next;
- free(head->reply);
- free(head);
- head = next;
- }
-
- pend = c->in.pending_replies;
- if(pend &&
- !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
- (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
- XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
- pend = 0;
- if(pend)
- pend->flags |= XCB_REQUEST_DISCARD_REPLY;
- else
- insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
+ /* Free any replies or errors that we've already read. Stop if
+ * xcb_wait_for_reply would block or we've run out of replies. */
+ while(poll_for_reply(c, request, &reply, 0) && reply)
+ free(reply);
+ /* If we've proven there are no more responses coming, we're done. */
+ if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
return;
- }
/* Walk the list of pending requests. Mark the first match for deletion. */
for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
{
- if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
+ if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
break;
- if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
+ if((*prev_pend)->first_request == request)
{
/* Pending reply found. Mark for discard: */
(*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
@@ -504,11 +475,7 @@ static void discard_reply(xcb_connection_t *c, unsigned int request)
}
/* Pending reply not found (likely due to _unchecked request). Create one: */
- widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
- if(widened_request > c->out.request)
- widened_request -= UINT64_C(1) << 32;
-
- insert_pending_discard(c, prev_pend, widened_request);
+ insert_pending_discard(c, prev_pend, request);
}
void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
@@ -521,7 +488,7 @@ void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
return;
pthread_mutex_lock(&c->iolock);
- discard_reply(c, sequence);
+ discard_reply(c, widen(c, sequence));
pthread_mutex_unlock(&c->iolock);
}
@@ -537,7 +504,7 @@ int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply,
}
assert(reply != 0);
pthread_mutex_lock(&c->iolock);
- ret = poll_for_reply(c, request, reply, error);
+ ret = poll_for_reply(c, widen(c, request), reply, error);
pthread_mutex_unlock(&c->iolock);
return ret;
}
@@ -575,21 +542,22 @@ xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
{
- /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
- * that would require factoring the locking out of xcb_get_input_focus,
- * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
- xcb_generic_error_t *ret;
+ uint64_t request;
+ xcb_generic_error_t *ret = 0;
void *reply;
if(c->has_error)
return 0;
- if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>=,c->in.request_expected)
- && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
+ pthread_mutex_lock(&c->iolock);
+ request = widen(c, cookie.sequence);
+ if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
+ && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
{
- free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
- assert(!ret);
+ _xcb_out_send_sync(c);
+ _xcb_out_flush_to(c, c->out.request);
}
- reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
+ reply = wait_for_reply(c, request, &ret);
assert(!reply);
+ pthread_mutex_unlock(&c->iolock);
return ret;
}
diff --git a/libxcb/src/xcb_out.c b/libxcb/src/xcb_out.c
index fbce7a0ea..4f27de116 100644
--- a/libxcb/src/xcb_out.c
+++ b/libxcb/src/xcb_out.c
@@ -1,362 +1,372 @@
-/* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Except as contained in this notice, the names of the authors or their
- * institutions shall not be used in advertising or otherwise to promote the
- * sale, use or other dealings in this Software without prior written
- * authorization from the authors.
- */
-
-/* Stuff that sends stuff to the server. */
-
-#include <assert.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-
-#include "xcb.h"
-#include "xcbext.h"
-#include "xcbint.h"
-#include "bigreq.h"
-
-static int write_block(xcb_connection_t *c, struct iovec *vector, int count)
-{
- while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
- {
- memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
- c->out.queue_len += vector[0].iov_len;
- vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
- vector[0].iov_len = 0;
- ++vector, --count;
- }
- if(!count)
- return 1;
-
- --vector, ++count;
- vector[0].iov_base = c->out.queue;
- vector[0].iov_len = c->out.queue_len;
- c->out.queue_len = 0;
- return _xcb_out_send(c, vector, count);
-}
-
-static void get_socket_back(xcb_connection_t *c)
-{
- while(c->out.return_socket && c->out.socket_moving)
- pthread_cond_wait(&c->out.socket_cond, &c->iolock);
- if(!c->out.return_socket)
- return;
-
- c->out.socket_moving = 1;
- pthread_mutex_unlock(&c->iolock);
- c->out.return_socket(c->out.socket_closure);
- pthread_mutex_lock(&c->iolock);
- c->out.socket_moving = 0;
-
- pthread_cond_broadcast(&c->out.socket_cond);
- c->out.return_socket = 0;
- c->out.socket_closure = 0;
- _xcb_in_replies_done(c);
-}
-
-/* Public interface */
-
-void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
-{
- if(c->has_error)
- return;
- pthread_mutex_lock(&c->out.reqlenlock);
- if(c->out.maximum_request_length_tag == LAZY_NONE)
- {
- const xcb_query_extension_reply_t *ext;
- ext = xcb_get_extension_data(c, &xcb_big_requests_id);
- if(ext && ext->present)
- {
- c->out.maximum_request_length_tag = LAZY_COOKIE;
- c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
- }
- else
- {
- c->out.maximum_request_length_tag = LAZY_FORCED;
- c->out.maximum_request_length.value = c->setup->maximum_request_length;
- }
- }
- pthread_mutex_unlock(&c->out.reqlenlock);
-}
-
-uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
-{
- if(c->has_error)
- return 0;
- xcb_prefetch_maximum_request_length(c);
- pthread_mutex_lock(&c->out.reqlenlock);
- if(c->out.maximum_request_length_tag == LAZY_COOKIE)
- {
- xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
- c->out.maximum_request_length_tag = LAZY_FORCED;
- if(r)
- {
- c->out.maximum_request_length.value = r->maximum_request_length;
- free(r);
- }
- else
- c->out.maximum_request_length.value = c->setup->maximum_request_length;
- }
- pthread_mutex_unlock(&c->out.reqlenlock);
- return c->out.maximum_request_length.value;
-}
-
-unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
-{
- static const union {
- struct {
- uint8_t major;
- uint8_t pad;
- uint16_t len;
- } fields;
- uint32_t packet;
- } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
- uint64_t request;
- uint32_t prefix[3] = { 0 };
- int veclen = req->count;
- enum workarounds workaround = WORKAROUND_NONE;
-
- if(c->has_error)
- return 0;
-
- assert(c != 0);
- assert(vector != 0);
- assert(req->count > 0);
-
- if(!(flags & XCB_REQUEST_RAW))
- {
- static const char pad[3];
- unsigned int i;
- uint16_t shortlen = 0;
- size_t longlen = 0;
- assert(vector[0].iov_len >= 4);
- /* set the major opcode, and the minor opcode for extensions */
- if(req->ext)
- {
- const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
- if(!(extension && extension->present))
- {
- _xcb_conn_shutdown(c);
- return 0;
- }
- ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
- ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
- }
- else
- ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
-
- /* put together the length field, possibly using BIGREQUESTS */
- for(i = 0; i < req->count; ++i)
- {
- longlen += vector[i].iov_len;
- if(!vector[i].iov_base)
- {
- vector[i].iov_base = (char *) pad;
- assert(vector[i].iov_len <= sizeof(pad));
- }
- }
- assert((longlen & 3) == 0);
- longlen >>= 2;
-
- if(longlen <= c->setup->maximum_request_length)
- {
- /* we don't need BIGREQUESTS. */
- shortlen = longlen;
- longlen = 0;
- }
- else if(longlen > xcb_get_maximum_request_length(c))
- {
- _xcb_conn_shutdown(c);
- return 0; /* server can't take this; maybe need BIGREQUESTS? */
- }
-
- /* set the length field. */
- ((uint16_t *) vector[0].iov_base)[1] = shortlen;
- if(!shortlen)
- prefix[2] = ++longlen;
- }
- flags &= ~XCB_REQUEST_RAW;
-
- /* do we need to work around the X server bug described in glx.xml? */
- /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
- * configuration, but that should be handled here anyway. */
- if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
- ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
- req->opcode == 21))
- workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
-
- /* get a sequence number and arrange for delivery. */
- pthread_mutex_lock(&c->iolock);
- /* wait for other writing threads to get out of my way. */
- while(c->out.writing)
- pthread_cond_wait(&c->out.cond, &c->iolock);
- get_socket_back(c);
-
- request = ++c->out.request;
- /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
- * a reply.
- * Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
- * applications see sequence 0 as that is used to indicate
- * an error in sending the request */
- while((req->isvoid &&
- c->out.request == c->in.request_expected + (1 << 16) - 1) ||
- request == 0)
- {
- prefix[0] = sync_req.packet;
- _xcb_in_expect_reply(c, request, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY);
- c->in.request_expected = c->out.request;
- request = ++c->out.request;
- }
-
- if(workaround != WORKAROUND_NONE || flags != 0)
- _xcb_in_expect_reply(c, request, workaround, flags);
- if(!req->isvoid)
- c->in.request_expected = c->out.request;
-
- if(prefix[0] || prefix[2])
- {
- --vector, ++veclen;
- if(prefix[2])
- {
- prefix[1] = ((uint32_t *) vector[1].iov_base)[0];
- vector[1].iov_base = (uint32_t *) vector[1].iov_base + 1;
- vector[1].iov_len -= sizeof(uint32_t);
- }
- vector[0].iov_len = sizeof(uint32_t) * ((prefix[0] ? 1 : 0) + (prefix[2] ? 2 : 0));
- vector[0].iov_base = prefix + !prefix[0];
- }
-
- if(!write_block(c, vector, veclen))
- {
- _xcb_conn_shutdown(c);
- request = 0;
- }
- pthread_mutex_unlock(&c->iolock);
- return request;
-}
-
-int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
-{
- int ret;
- if(c->has_error)
- return 0;
- pthread_mutex_lock(&c->iolock);
- get_socket_back(c);
- ret = _xcb_out_flush_to(c, c->out.request);
- if(ret)
- {
- c->out.return_socket = return_socket;
- c->out.socket_closure = closure;
- if(flags)
- _xcb_in_expect_reply(c, c->out.request, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
- assert(c->out.request == c->out.request_written);
- *sent = c->out.request;
- }
- pthread_mutex_unlock(&c->iolock);
- return ret;
-}
-
-int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
-{
- int ret;
- if(c->has_error)
- return 0;
- pthread_mutex_lock(&c->iolock);
- c->out.request += requests;
- ret = _xcb_out_send(c, vector, count);
- pthread_mutex_unlock(&c->iolock);
- return ret;
-}
-
-int xcb_flush(xcb_connection_t *c)
-{
- int ret;
- if(c->has_error)
- return 0;
- pthread_mutex_lock(&c->iolock);
- ret = _xcb_out_flush_to(c, c->out.request);
- pthread_mutex_unlock(&c->iolock);
- return ret;
-}
-
-/* Private interface */
-
-int _xcb_out_init(_xcb_out *out)
-{
- if(pthread_cond_init(&out->socket_cond, 0))
- return 0;
- out->return_socket = 0;
- out->socket_closure = 0;
- out->socket_moving = 0;
-
- if(pthread_cond_init(&out->cond, 0))
- return 0;
- out->writing = 0;
-
- out->queue_len = 0;
-
- out->request = 0;
- out->request_written = 0;
-
- if(pthread_mutex_init(&out->reqlenlock, 0))
- return 0;
- out->maximum_request_length_tag = LAZY_NONE;
-
- return 1;
-}
-
-void _xcb_out_destroy(_xcb_out *out)
-{
- pthread_cond_destroy(&out->cond);
- pthread_mutex_destroy(&out->reqlenlock);
-}
-
-int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
-{
- int ret = 1;
- while(ret && count)
- ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
- c->out.request_written = c->out.request;
- pthread_cond_broadcast(&c->out.cond);
- _xcb_in_wake_up_next_reader(c);
- return ret;
-}
-
-int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
-{
- assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
- if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
- return 1;
- if(c->out.queue_len)
- {
- struct iovec vec;
- vec.iov_base = c->out.queue;
- vec.iov_len = c->out.queue_len;
- c->out.queue_len = 0;
- return _xcb_out_send(c, &vec, 1);
- }
- while(c->out.writing)
- pthread_cond_wait(&c->out.cond, &c->iolock);
- assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));
- return 1;
-}
+/* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the names of the authors or their
+ * institutions shall not be used in advertising or otherwise to promote the
+ * sale, use or other dealings in this Software without prior written
+ * authorization from the authors.
+ */
+
+/* Stuff that sends stuff to the server. */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "xcb.h"
+#include "xcbext.h"
+#include "xcbint.h"
+#include "bigreq.h"
+
+static inline void send_request(xcb_connection_t *c, int isvoid, enum workarounds workaround, int flags, struct iovec *vector, int count)
+{
+ if(c->has_error)
+ return;
+
+ ++c->out.request;
+ if(!isvoid)
+ c->in.request_expected = c->out.request;
+ if(workaround != WORKAROUND_NONE || flags != 0)
+ _xcb_in_expect_reply(c, c->out.request, workaround, flags);
+
+ while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
+ {
+ memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
+ c->out.queue_len += vector[0].iov_len;
+ vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
+ vector[0].iov_len = 0;
+ ++vector, --count;
+ }
+ if(!count)
+ return;
+
+ --vector, ++count;
+ vector[0].iov_base = c->out.queue;
+ vector[0].iov_len = c->out.queue_len;
+ c->out.queue_len = 0;
+ _xcb_out_send(c, vector, count);
+}
+
+static void send_sync(xcb_connection_t *c)
+{
+ static const union {
+ struct {
+ uint8_t major;
+ uint8_t pad;
+ uint16_t len;
+ } fields;
+ uint32_t packet;
+ } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
+ struct iovec vector[2];
+ vector[1].iov_base = (char *) &sync_req;
+ vector[1].iov_len = sizeof(sync_req);
+ send_request(c, 0, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY, vector + 1, 1);
+}
+
+static void get_socket_back(xcb_connection_t *c)
+{
+ while(c->out.return_socket && c->out.socket_moving)
+ pthread_cond_wait(&c->out.socket_cond, &c->iolock);
+ if(!c->out.return_socket)
+ return;
+
+ c->out.socket_moving = 1;
+ pthread_mutex_unlock(&c->iolock);
+ c->out.return_socket(c->out.socket_closure);
+ pthread_mutex_lock(&c->iolock);
+ c->out.socket_moving = 0;
+
+ pthread_cond_broadcast(&c->out.socket_cond);
+ c->out.return_socket = 0;
+ c->out.socket_closure = 0;
+ _xcb_in_replies_done(c);
+}
+
+/* Public interface */
+
+void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
+{
+ if(c->has_error)
+ return;
+ pthread_mutex_lock(&c->out.reqlenlock);
+ if(c->out.maximum_request_length_tag == LAZY_NONE)
+ {
+ const xcb_query_extension_reply_t *ext;
+ ext = xcb_get_extension_data(c, &xcb_big_requests_id);
+ if(ext && ext->present)
+ {
+ c->out.maximum_request_length_tag = LAZY_COOKIE;
+ c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
+ }
+ else
+ {
+ c->out.maximum_request_length_tag = LAZY_FORCED;
+ c->out.maximum_request_length.value = c->setup->maximum_request_length;
+ }
+ }
+ pthread_mutex_unlock(&c->out.reqlenlock);
+}
+
+uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
+{
+ if(c->has_error)
+ return 0;
+ xcb_prefetch_maximum_request_length(c);
+ pthread_mutex_lock(&c->out.reqlenlock);
+ if(c->out.maximum_request_length_tag == LAZY_COOKIE)
+ {
+ xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
+ c->out.maximum_request_length_tag = LAZY_FORCED;
+ if(r)
+ {
+ c->out.maximum_request_length.value = r->maximum_request_length;
+ free(r);
+ }
+ else
+ c->out.maximum_request_length.value = c->setup->maximum_request_length;
+ }
+ pthread_mutex_unlock(&c->out.reqlenlock);
+ return c->out.maximum_request_length.value;
+}
+
+unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
+{
+ uint64_t request;
+ uint32_t prefix[2];
+ int veclen = req->count;
+ enum workarounds workaround = WORKAROUND_NONE;
+
+ if(c->has_error)
+ return 0;
+
+ assert(c != 0);
+ assert(vector != 0);
+ assert(req->count > 0);
+
+ if(!(flags & XCB_REQUEST_RAW))
+ {
+ static const char pad[3];
+ unsigned int i;
+ uint16_t shortlen = 0;
+ size_t longlen = 0;
+ assert(vector[0].iov_len >= 4);
+ /* set the major opcode, and the minor opcode for extensions */
+ if(req->ext)
+ {
+ const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
+ if(!(extension && extension->present))
+ {
+ _xcb_conn_shutdown(c);
+ return 0;
+ }
+ ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
+ ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
+ }
+ else
+ ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
+
+ /* put together the length field, possibly using BIGREQUESTS */
+ for(i = 0; i < req->count; ++i)
+ {
+ longlen += vector[i].iov_len;
+ if(!vector[i].iov_base)
+ {
+ vector[i].iov_base = (char *) pad;
+ assert(vector[i].iov_len <= sizeof(pad));
+ }
+ }
+ assert((longlen & 3) == 0);
+ longlen >>= 2;
+
+ if(longlen <= c->setup->maximum_request_length)
+ {
+ /* we don't need BIGREQUESTS. */
+ shortlen = longlen;
+ longlen = 0;
+ }
+ else if(longlen > xcb_get_maximum_request_length(c))
+ {
+ _xcb_conn_shutdown(c);
+ return 0; /* server can't take this; maybe need BIGREQUESTS? */
+ }
+
+ /* set the length field. */
+ ((uint16_t *) vector[0].iov_base)[1] = shortlen;
+ if(!shortlen)
+ {
+ prefix[0] = ((uint32_t *) vector[0].iov_base)[0];
+ prefix[1] = ++longlen;
+ vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1;
+ vector[0].iov_len -= sizeof(uint32_t);
+ --vector, ++veclen;
+ vector[0].iov_base = prefix;
+ vector[0].iov_len = sizeof(prefix);
+ }
+ }
+ flags &= ~XCB_REQUEST_RAW;
+
+ /* do we need to work around the X server bug described in glx.xml? */
+ /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
+ * configuration, but that should be handled here anyway. */
+ if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
+ ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
+ req->opcode == 21))
+ workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
+
+ /* get a sequence number and arrange for delivery. */
+ pthread_mutex_lock(&c->iolock);
+ /* wait for other writing threads to get out of my way. */
+ while(c->out.writing)
+ pthread_cond_wait(&c->out.cond, &c->iolock);
+ get_socket_back(c);
+
+ /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
+ * a reply. */
+ if(req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2)
+ send_sync(c);
+ /* Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
+ * applications see sequence 0 as that is used to indicate
+ * an error in sending the request */
+ if((unsigned int) (c->out.request + 1) == 0)
+ send_sync(c);
+
+ /* The above send_sync calls could drop the I/O lock, but this
+ * thread will still exclude any other thread that tries to write,
+ * so the sequence number postconditions still hold. */
+ send_request(c, req->isvoid, workaround, flags, vector, veclen);
+ request = c->has_error ? 0 : c->out.request;
+ pthread_mutex_unlock(&c->iolock);
+ return request;
+}
+
+int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
+{
+ int ret;
+ if(c->has_error)
+ return 0;
+ pthread_mutex_lock(&c->iolock);
+ get_socket_back(c);
+ ret = _xcb_out_flush_to(c, c->out.request);
+ if(ret)
+ {
+ c->out.return_socket = return_socket;
+ c->out.socket_closure = closure;
+ if(flags)
+ _xcb_in_expect_reply(c, c->out.request, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
+ assert(c->out.request == c->out.request_written);
+ *sent = c->out.request;
+ }
+ pthread_mutex_unlock(&c->iolock);
+ return ret;
+}
+
+int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
+{
+ int ret;
+ if(c->has_error)
+ return 0;
+ pthread_mutex_lock(&c->iolock);
+ c->out.request += requests;
+ ret = _xcb_out_send(c, vector, count);
+ pthread_mutex_unlock(&c->iolock);
+ return ret;
+}
+
+int xcb_flush(xcb_connection_t *c)
+{
+ int ret;
+ if(c->has_error)
+ return 0;
+ pthread_mutex_lock(&c->iolock);
+ ret = _xcb_out_flush_to(c, c->out.request);
+ pthread_mutex_unlock(&c->iolock);
+ return ret;
+}
+
+/* Private interface */
+
+int _xcb_out_init(_xcb_out *out)
+{
+ if(pthread_cond_init(&out->socket_cond, 0))
+ return 0;
+ out->return_socket = 0;
+ out->socket_closure = 0;
+ out->socket_moving = 0;
+
+ if(pthread_cond_init(&out->cond, 0))
+ return 0;
+ out->writing = 0;
+
+ out->queue_len = 0;
+
+ out->request = 0;
+ out->request_written = 0;
+
+ if(pthread_mutex_init(&out->reqlenlock, 0))
+ return 0;
+ out->maximum_request_length_tag = LAZY_NONE;
+
+ return 1;
+}
+
+void _xcb_out_destroy(_xcb_out *out)
+{
+ pthread_cond_destroy(&out->cond);
+ pthread_mutex_destroy(&out->reqlenlock);
+}
+
+int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
+{
+ int ret = 1;
+ while(ret && count)
+ ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
+ c->out.request_written = c->out.request;
+ pthread_cond_broadcast(&c->out.cond);
+ _xcb_in_wake_up_next_reader(c);
+ return ret;
+}
+
+void _xcb_out_send_sync(xcb_connection_t *c)
+{
+ /* wait for other writing threads to get out of my way. */
+ while(c->out.writing)
+ pthread_cond_wait(&c->out.cond, &c->iolock);
+ get_socket_back(c);
+ send_sync(c);
+}
+
+int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
+{
+ assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
+ if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
+ return 1;
+ if(c->out.queue_len)
+ {
+ struct iovec vec;
+ vec.iov_base = c->out.queue;
+ vec.iov_len = c->out.queue_len;
+ c->out.queue_len = 0;
+ return _xcb_out_send(c, &vec, 1);
+ }
+ while(c->out.writing)
+ pthread_cond_wait(&c->out.cond, &c->iolock);
+ assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));
+ return 1;
+}
diff --git a/libxcb/src/xcbint.h b/libxcb/src/xcbint.h
index 6613433ce..5950823f0 100644
--- a/libxcb/src/xcbint.h
+++ b/libxcb/src/xcbint.h
@@ -54,7 +54,6 @@ enum lazy_reply_tag
#define XCB_PAD(i) (-(i) & 3)
#define XCB_SEQUENCE_COMPARE(a,op,b) ((int64_t) ((a) - (b)) op 0)
-#define XCB_SEQUENCE_COMPARE_32(a,op,b) (((int) (a) - (int) (b)) op 0)
#ifndef offsetof
#define offsetof(type,member) ((size_t) &((type *)0)->member)
@@ -107,6 +106,7 @@ int _xcb_out_init(_xcb_out *out);
void _xcb_out_destroy(_xcb_out *out);
int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count);
+void _xcb_out_send_sync(xcb_connection_t *c);
int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request);