1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
|
/* Copyright (C) 2003-2006 Jamey Sharp, Josh Triplett
* This file is licensed under the MIT license. See the file COPYING. */
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "Xlibint.h"
#include "locking.h"
#include "Xxcbint.h"
#include <xcb/xcbext.h>
#include <xcb/xcbxlib.h>
#include <pthread.h>
static void _XCBLockDisplay(Display *dpy)
{
if(dpy->xcb->lock_fns.lock_display)
dpy->xcb->lock_fns.lock_display(dpy);
if(!dpy->lock || dpy->lock->locking_level == 0)
xcb_xlib_lock(dpy->xcb->connection);
if(!(dpy->flags & XlibDisplayIOError))
_XGetXCBBuffer(dpy);
}
/* XXX: If you change this function, update _XReply's copy of its guts! */
static void _XCBUnlockDisplay(Display *dpy)
{
if(!(dpy->flags & XlibDisplayIOError))
{
_XPutXCBBuffer(dpy);
assert(dpy->xcb->partial_request == 0);
assert(xcb_get_request_sent(dpy->xcb->connection) == dpy->request);
/* Traditional Xlib does this in _XSend; see the Xlib/XCB version
* of that function for why we do it here instead. */
_XSetSeqSyncFunction(dpy);
}
if(!dpy->lock || dpy->lock->locking_level == 0)
xcb_xlib_unlock(dpy->xcb->connection);
if(dpy->xcb->lock_fns.unlock_display)
dpy->xcb->lock_fns.unlock_display(dpy);
}
int _XCBInitDisplayLock(Display *dpy)
{
if(!dpy->lock_fns && !(dpy->lock_fns = Xcalloc(1, sizeof(*dpy->lock_fns))))
return 0;
dpy->xcb->lock_fns.lock_display = dpy->lock_fns->lock_display;
dpy->lock_fns->lock_display = _XCBLockDisplay;
dpy->xcb->lock_fns.unlock_display = dpy->lock_fns->unlock_display;
dpy->lock_fns->unlock_display = _XCBUnlockDisplay;
return 1;
}
void _XCBShutdownDisplayLock(Display *dpy)
{
if(dpy->lock_fns) {
Xfree((char *)dpy->lock_fns);
dpy->lock_fns = NULL;
}
}
void _XGetXCBBuffer(Display *dpy)
{
static const xReq dummy_request;
unsigned int xcb_req = xcb_get_request_sent(dpy->xcb->connection);
if(xcb_connection_has_error(dpy->xcb->connection))
_XIOError(dpy);
/* if Xlib has a partial request pending then XCB doesn't know about
* the current request yet */
if(dpy->xcb->partial_request)
++xcb_req;
assert(XCB_SEQUENCE_COMPARE(xcb_req, >=, dpy->request));
dpy->request = xcb_req;
dpy->last_req = (char *) &dummy_request;
}
static size_t request_length(struct iovec *vec)
{
/* we have at least part of a request. dig out the length field.
* note that length fields are always in vec[0]: Xlib doesn't split
* fixed-length request parts. */
size_t len;
assert(vec[0].iov_len >= 4);
len = ((uint16_t *) vec[0].iov_base)[1];
if(len == 0)
{
/* it's a bigrequest. dig out the *real* length field. */
assert(vec[0].iov_len >= 8);
len = ((uint32_t *) vec[0].iov_base)[1];
}
return len << 2;
}
static inline int issue_complete_request(Display *dpy, int veclen, struct iovec *vec)
{
xcb_protocol_request_t xcb_req = { 0 };
unsigned int sequence;
int flags = XCB_REQUEST_RAW;
int i;
size_t len;
/* skip empty iovecs. if no iovecs remain, we're done. */
assert(veclen >= 0);
while(veclen > 0 && vec[0].iov_len == 0)
--veclen, ++vec;
if(!veclen)
return 0;
len = request_length(vec);
/* do we have enough data for a complete request? how many iovec
* elements does it span? */
for(i = 0; i < veclen; ++i)
{
size_t oldlen = len;
len -= vec[i].iov_len;
/* if len is now 0 or has wrapped, we have enough data. */
if((len - 1) > oldlen)
break;
}
if(i == veclen)
return 0;
/* we have enough data to issue one complete request. the remaining
* code can't fail. */
/* len says how far we overshot our data needs. (it's "negative" if
* we actually overshot, or 0 if we're right on.) */
vec[i].iov_len += len;
xcb_req.count = i + 1;
xcb_req.opcode = ((uint8_t *) vec[0].iov_base)[0];
/* if we don't own the event queue, we have to ask XCB to set our
* errors aside for us. */
if(dpy->xcb->event_owner != XlibOwnsEventQueue)
flags |= XCB_REQUEST_CHECKED;
/* XCB will always skip request 0; account for that in the Xlib count */
if (xcb_get_request_sent(dpy->xcb->connection) == 0xffffffff)
dpy->request++;
/* send the accumulated request. */
sequence = xcb_send_request(dpy->xcb->connection, flags, vec, &xcb_req);
if(!sequence)
_XIOError(dpy);
/* update the iovecs to refer only to data not yet sent. */
vec[i].iov_len = -len;
/* iff we asked XCB to set aside errors, we must pick those up
* eventually. iff there are async handlers, we may have just
* issued requests that will generate replies. in either case,
* we need to remember to check later. */
if(flags & XCB_REQUEST_CHECKED || dpy->async_handlers)
{
PendingRequest *req = malloc(sizeof(PendingRequest));
assert(req);
req->next = 0;
req->waiters = -1;
req->sequence = sequence;
*dpy->xcb->pending_requests_tail = req;
dpy->xcb->pending_requests_tail = &req->next;
}
return 1;
}
void _XPutXCBBuffer(Display *dpy)
{
static char const pad[3];
const int padsize = -dpy->xcb->request_extra_size & 3;
xcb_connection_t *c = dpy->xcb->connection;
_XExtension *ext;
struct iovec iov[6];
assert_sequence_less(dpy->last_request_read, dpy->request);
assert_sequence_less(xcb_get_request_sent(c), dpy->request);
for(ext = dpy->flushes; ext; ext = ext->next_flush)
{
ext->before_flush(dpy, &ext->codes, dpy->buffer, dpy->bufptr - dpy->buffer);
if(dpy->xcb->request_extra)
{
ext->before_flush(dpy, &ext->codes, dpy->xcb->request_extra, dpy->xcb->request_extra_size);
if(padsize)
ext->before_flush(dpy, &ext->codes, pad, padsize);
}
}
iov[2].iov_base = dpy->xcb->partial_request;
iov[2].iov_len = dpy->xcb->partial_request_offset;
iov[3].iov_base = dpy->buffer;
iov[3].iov_len = dpy->bufptr - dpy->buffer;
iov[4].iov_base = (caddr_t) dpy->xcb->request_extra;
iov[4].iov_len = dpy->xcb->request_extra_size;
iov[5].iov_base = (caddr_t) pad;
iov[5].iov_len = padsize;
while(issue_complete_request(dpy, 4, iov + 2))
/* empty */;
/* first discard any completed partial_request. */
if(iov[2].iov_len == 0 && dpy->xcb->partial_request)
{
free(dpy->xcb->partial_request);
dpy->xcb->partial_request = 0;
dpy->xcb->partial_request_offset = 0;
}
/* is there anything to copy into partial_request? */
if(iov[3].iov_len != 0 || iov[4].iov_len != 0 || iov[5].iov_len != 0)
{
int i;
if(!dpy->xcb->partial_request)
{
size_t len = request_length(iov + 3);
assert(!dpy->xcb->partial_request_offset);
dpy->xcb->partial_request = malloc(len);
assert(dpy->xcb->partial_request);
}
for(i = 3; i < sizeof(iov) / sizeof(*iov); ++i)
{
memcpy(dpy->xcb->partial_request + dpy->xcb->partial_request_offset, iov[i].iov_base, iov[i].iov_len);
dpy->xcb->partial_request_offset += iov[i].iov_len;
}
}
dpy->xcb->request_extra = 0;
dpy->xcb->request_extra_size = 0;
dpy->bufptr = dpy->buffer;
}
|