Annotation of src/external/mpl/bind/dist/lib/isc/netmgr/udp.c, Revision 1.6
1.6 ! rillig 1: /* $NetBSD: udp.c,v 1.5 2021/03/23 20:59:03 christos Exp $ */
1.1 christos 2:
3: /*
4: * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
5: *
6: * This Source Code Form is subject to the terms of the Mozilla Public
7: * License, v. 2.0. If a copy of the MPL was not distributed with this
1.4 christos 8: * file, you can obtain one at https://mozilla.org/MPL/2.0/.
1.1 christos 9: *
10: * See the COPYRIGHT file distributed with this work for additional
11: * information regarding copyright ownership.
12: */
13:
14: #include <unistd.h>
15: #include <uv.h>
16:
17: #include <isc/atomic.h>
18: #include <isc/buffer.h>
19: #include <isc/condition.h>
1.4 christos 20: #include <isc/errno.h>
1.1 christos 21: #include <isc/magic.h>
22: #include <isc/mem.h>
23: #include <isc/netmgr.h>
24: #include <isc/random.h>
25: #include <isc/refcount.h>
26: #include <isc/region.h>
27: #include <isc/result.h>
28: #include <isc/sockaddr.h>
29: #include <isc/thread.h>
30: #include <isc/util.h>
31:
32: #include "netmgr-int.h"
33: #include "uv-compat.h"
34:
35: static isc_result_t
36: udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
37: isc_sockaddr_t *peer);
38:
39: static void
40: udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
41: const struct sockaddr *addr, unsigned flags);
42:
43: static void
44: udp_send_cb(uv_udp_send_t *req, int status);
45:
1.4 christos 46: static void
47: udp_close_cb(uv_handle_t *handle);
48:
49: static void
50: timer_close_cb(uv_handle_t *handle);
51:
52: static void
53: udp_close_direct(isc_nmsocket_t *sock);
54:
55: static void
56: failed_read_cb(isc_nmsocket_t *sock, isc_result_t result);
57:
58: static void
59: failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
60: isc_result_t eresult);
61:
62: static void
63: stop_udp_parent(isc_nmsocket_t *sock);
64: static void
65: stop_udp_child(isc_nmsocket_t *sock);
66:
67: static void
68: start_reading(isc_nmsocket_t *sock);
69: static void
70: stop_reading(isc_nmsocket_t *sock);
71:
72: static isc__nm_uvreq_t *
73: get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr);
74:
75: static bool
76: inactive(isc_nmsocket_t *sock) {
77: return (!isc__nmsocket_active(sock) ||
78: atomic_load(&sock->mgr->closing) ||
79: (sock->server != NULL && !isc__nmsocket_active(sock->server)));
80: }
81:
82: static uv_os_sock_t
83: isc__nm_udp_lb_socket(sa_family_t sa_family) {
84: isc_result_t result;
85: uv_os_sock_t sock;
86:
87: result = isc__nm_socket(sa_family, SOCK_DGRAM, 0, &sock);
88: RUNTIME_CHECK(result == ISC_R_SUCCESS);
89:
90: (void)isc__nm_socket_incoming_cpu(sock);
91: (void)isc__nm_socket_dontfrag(sock, sa_family);
92:
93: result = isc__nm_socket_reuse(sock);
94: RUNTIME_CHECK(result == ISC_R_SUCCESS);
95:
96: #if HAVE_SO_REUSEPORT_LB
97: result = isc__nm_socket_reuse_lb(sock);
98: RUNTIME_CHECK(result == ISC_R_SUCCESS);
99: #endif
100:
101: return (sock);
102: }
103:
1.1 christos 104: isc_result_t
105: isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
106: void *cbarg, size_t extrahandlesize, isc_nmsocket_t **sockp) {
1.4 christos 107: isc_result_t result = ISC_R_SUCCESS;
108: isc_nmsocket_t *sock = NULL;
109: sa_family_t sa_family = iface->addr.type.sa.sa_family;
110: size_t children_size = 0;
111: #if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
112: uv_os_sock_t fd = -1;
113: #endif
1.1 christos 114:
115: REQUIRE(VALID_NM(mgr));
116:
117: /*
118: * We are creating mgr->nworkers duplicated sockets, one
119: * socket for each worker thread.
120: */
1.4 christos 121: sock = isc_mem_get(mgr->mctx, sizeof(isc_nmsocket_t));
122: isc__nmsocket_init(sock, mgr, isc_nm_udplistener, iface);
123:
124: sock->rchildren = 0;
125: #if defined(WIN32)
126: sock->nchildren = 1;
127: #else
128: sock->nchildren = mgr->nworkers;
129: #endif
1.1 christos 130:
1.4 christos 131: children_size = sock->nchildren * sizeof(sock->children[0]);
132: sock->children = isc_mem_get(mgr->mctx, children_size);
133: memset(sock->children, 0, children_size);
134:
135: sock->recv_cb = cb;
136: sock->recv_cbarg = cbarg;
137: sock->extrahandlesize = extrahandlesize;
138: sock->result = ISC_R_DEFAULT;
139: sock->tid = isc_random_uniform(sock->nchildren);
140: sock->fd = -1;
141:
142: #if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
143: fd = isc__nm_udp_lb_socket(sa_family);
144: #endif
145:
146: for (size_t i = 0; i < sock->nchildren; i++) {
1.1 christos 147: isc__netievent_udplisten_t *ievent = NULL;
1.4 christos 148: isc_nmsocket_t *csock = &sock->children[i];
1.1 christos 149:
150: isc__nmsocket_init(csock, mgr, isc_nm_udpsocket, iface);
1.4 christos 151: csock->parent = sock;
152: csock->iface = sock->iface;
153: csock->reading = true;
154: csock->recv_cb = cb;
155: csock->recv_cbarg = cbarg;
156: csock->extrahandlesize = sock->extrahandlesize;
1.1 christos 157: csock->tid = i;
158:
1.4 christos 159: #if HAVE_SO_REUSEPORT_LB || defined(WIN32)
160: csock->fd = isc__nm_udp_lb_socket(sa_family);
161: #else
162: csock->fd = dup(fd);
163: #endif
164: REQUIRE(csock->fd >= 0);
1.1 christos 165:
1.4 christos 166: ievent = isc__nm_get_netievent_udplisten(mgr, csock);
1.1 christos 167: isc__nm_enqueue_ievent(&mgr->workers[i],
168: (isc__netievent_t *)ievent);
169: }
170:
1.4 christos 171: #if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
172: isc__nm_closesocket(fd);
173: #endif
174:
175: LOCK(&sock->lock);
176: while (sock->rchildren != sock->nchildren) {
177: WAIT(&sock->cond, &sock->lock);
178: }
179: result = sock->result;
180: atomic_store(&sock->active, true);
181: BROADCAST(&sock->scond);
182: UNLOCK(&sock->lock);
183: INSIST(result != ISC_R_DEFAULT);
184:
185: if (result == ISC_R_SUCCESS) {
186: REQUIRE(sock->rchildren == sock->nchildren);
187: *sockp = sock;
188: } else {
189: atomic_store(&sock->active, false);
190: isc__nm_udp_stoplistening(sock);
191: isc_nmsocket_close(&sock);
192: }
193:
194: return (result);
195: }
196:
197: /*%<
198: * Allocator for UDP recv operations. Limited to size 20 * (2^16 + 2),
199: * which allows enough space for recvmmsg() to get multiple messages at
200: * a time.
201: *
202: * Note this doesn't actually allocate anything, it just assigns the
203: * worker's receive buffer to a socket, and marks it as "in use".
204: */
205: static void
206: udp_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
207: isc_nmsocket_t *sock = uv_handle_get_data(handle);
208: isc__networker_t *worker = NULL;
209:
210: REQUIRE(VALID_NMSOCK(sock));
211: REQUIRE(sock->type == isc_nm_udpsocket);
212: REQUIRE(isc__nm_in_netthread());
213: REQUIRE(size <= ISC_NETMGR_RECVBUF_SIZE);
214:
215: worker = &sock->mgr->workers[sock->tid];
216: INSIST(!worker->recvbuf_inuse);
217:
218: buf->base = worker->recvbuf;
219: buf->len = ISC_NETMGR_RECVBUF_SIZE;
220: worker->recvbuf_inuse = true;
1.1 christos 221: }
222:
223: /*
1.4 christos 224: * Asynchronous 'udplisten' call handler: start listening on a UDP socket.
1.1 christos 225: */
226: void
227: isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) {
228: isc__netievent_udplisten_t *ievent = (isc__netievent_udplisten_t *)ev0;
1.4 christos 229: isc_nmiface_t *iface = NULL;
230: isc_nmsocket_t *sock = NULL;
1.1 christos 231: int r, uv_bind_flags = 0;
232: int uv_init_flags = 0;
1.4 christos 233: sa_family_t sa_family;
234: isc_result_t result = ISC_R_DEFAULT;
235:
236: REQUIRE(VALID_NMSOCK(ievent->sock));
237: REQUIRE(ievent->sock->tid == isc_nm_tid());
238: REQUIRE(VALID_NMSOCK(ievent->sock->parent));
239:
240: sock = ievent->sock;
241: iface = sock->iface;
242: sa_family = iface->addr.type.sa.sa_family;
1.1 christos 243:
244: REQUIRE(sock->type == isc_nm_udpsocket);
245: REQUIRE(sock->iface != NULL);
246: REQUIRE(sock->parent != NULL);
247: REQUIRE(sock->tid == isc_nm_tid());
248:
249: #ifdef UV_UDP_RECVMMSG
250: uv_init_flags |= UV_UDP_RECVMMSG;
251: #endif
1.4 christos 252: r = uv_udp_init_ex(&worker->loop, &sock->uv_handle.udp, uv_init_flags);
253: RUNTIME_CHECK(r == 0);
254: uv_handle_set_data(&sock->uv_handle.handle, sock);
255: /* This keeps the socket alive after everything else is gone */
256: isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL });
257:
258: r = uv_timer_init(&worker->loop, &sock->timer);
259: RUNTIME_CHECK(r == 0);
260: uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
261:
262: LOCK(&sock->parent->lock);
1.1 christos 263:
264: r = uv_udp_open(&sock->uv_handle.udp, sock->fd);
1.4 christos 265: if (r < 0) {
266: isc__nm_closesocket(sock->fd);
1.1 christos 267: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPENFAIL]);
1.4 christos 268: goto done;
1.1 christos 269: }
1.4 christos 270: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPEN]);
1.1 christos 271:
1.4 christos 272: if (sa_family == AF_INET6) {
1.1 christos 273: uv_bind_flags |= UV_UDP_IPV6ONLY;
274: }
275:
1.4 christos 276: #if HAVE_SO_REUSEPORT_LB || defined(WIN32)
277: r = isc_uv_udp_freebind(&sock->uv_handle.udp,
278: &sock->parent->iface->addr.type.sa,
279: uv_bind_flags);
1.1 christos 280: if (r < 0) {
281: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_BINDFAIL]);
1.4 christos 282: goto done;
283: }
284: #else
285: if (sock->parent->fd == -1) {
286: /* This thread is first, bind the socket */
287: r = isc_uv_udp_freebind(&sock->uv_handle.udp,
288: &sock->parent->iface->addr.type.sa,
289: uv_bind_flags);
290: if (r < 0) {
291: isc__nm_incstats(sock->mgr,
292: sock->statsindex[STATID_BINDFAIL]);
293: goto done;
294: }
295: sock->parent->uv_handle.udp.flags = sock->uv_handle.udp.flags;
296: sock->parent->fd = sock->fd;
297: } else {
298: /* The socket is already bound, just copy the flags */
299: sock->uv_handle.udp.flags = sock->parent->uv_handle.udp.flags;
1.1 christos 300: }
1.4 christos 301: #endif
302:
1.1 christos 303: #ifdef ISC_RECV_BUFFER_SIZE
304: uv_recv_buffer_size(&sock->uv_handle.handle,
305: &(int){ ISC_RECV_BUFFER_SIZE });
306: #endif
307: #ifdef ISC_SEND_BUFFER_SIZE
308: uv_send_buffer_size(&sock->uv_handle.handle,
309: &(int){ ISC_SEND_BUFFER_SIZE });
310: #endif
1.4 christos 311: r = uv_udp_recv_start(&sock->uv_handle.udp, udp_alloc_cb, udp_recv_cb);
312: if (r != 0) {
313: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_BINDFAIL]);
314: goto done;
315: }
1.1 christos 316:
1.4 christos 317: atomic_store(&sock->listening, true);
1.1 christos 318:
1.4 christos 319: done:
320: result = isc__nm_uverr2result(r);
321: sock->parent->rchildren += 1;
322: if (sock->parent->result == ISC_R_DEFAULT) {
323: sock->parent->result = result;
324: }
325: SIGNAL(&sock->parent->cond);
326: if (!atomic_load(&sock->parent->active)) {
327: WAIT(&sock->parent->scond, &sock->parent->lock);
328: }
329: INSIST(atomic_load(&sock->parent->active));
1.1 christos 330: UNLOCK(&sock->parent->lock);
331: }
332:
333: static void
1.4 christos 334: enqueue_stoplistening(isc_nmsocket_t *sock) {
335: isc__netievent_udpstop_t *ievent =
336: isc__nm_get_netievent_udpstop(sock->mgr, sock);
337: isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
338: (isc__netievent_t *)ievent);
1.1 christos 339: }
340:
341: void
342: isc__nm_udp_stoplistening(isc_nmsocket_t *sock) {
343: REQUIRE(VALID_NMSOCK(sock));
344: REQUIRE(sock->type == isc_nm_udplistener);
345:
1.4 christos 346: if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
347: true)) {
348: INSIST(0);
349: ISC_UNREACHABLE();
1.3 christos 350: }
351:
1.4 christos 352: enqueue_stoplistening(sock);
1.1 christos 353: }
354:
355: /*
1.4 christos 356: * Asynchronous 'udpstop' call handler: stop listening on a UDP socket.
1.1 christos 357: */
358: void
359: isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0) {
360: isc__netievent_udpstop_t *ievent = (isc__netievent_udpstop_t *)ev0;
361: isc_nmsocket_t *sock = ievent->sock;
362:
363: UNUSED(worker);
364:
1.4 christos 365: REQUIRE(VALID_NMSOCK(sock));
366: REQUIRE(sock->tid == isc_nm_tid());
367:
1.1 christos 368: if (sock->parent != NULL) {
369: stop_udp_child(sock);
370: return;
371: }
372:
373: /*
374: * If network manager is paused, re-enqueue the event for later.
375: */
376: if (!isc__nm_acquire_interlocked(sock->mgr)) {
1.4 christos 377: enqueue_stoplistening(sock);
1.1 christos 378: } else {
1.4 christos 379: stop_udp_parent(sock);
1.1 christos 380: isc__nm_drop_interlocked(sock->mgr);
381: }
382: }
383:
384: /*
385: * udp_recv_cb handles incoming UDP packet from uv. The buffer here is
1.4 christos 386: * reused for a series of packets, so we need to allocate a new one.
387: * This new one can be reused to send the response then.
1.1 christos 388: */
389: static void
390: udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
391: const struct sockaddr *addr, unsigned flags) {
392: isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)handle);
1.4 christos 393: isc__nm_uvreq_t *req = NULL;
1.1 christos 394: uint32_t maxudp;
1.4 christos 395: bool free_buf;
396: isc_sockaddr_t sockaddr;
397: isc_result_t result;
1.1 christos 398:
399: REQUIRE(VALID_NMSOCK(sock));
1.4 christos 400: REQUIRE(sock->tid == isc_nm_tid());
401: REQUIRE(sock->reading);
1.1 christos 402:
1.4 christos 403: #ifdef UV_UDP_MMSG_FREE
404: free_buf = ((flags & UV_UDP_MMSG_FREE) == UV_UDP_MMSG_FREE);
405: #elif UV_UDP_MMSG_CHUNK
1.1 christos 406: free_buf = ((flags & UV_UDP_MMSG_CHUNK) == 0);
407: #else
1.4 christos 408: free_buf = true;
1.1 christos 409: UNUSED(flags);
410: #endif
411:
412: /*
1.4 christos 413: * Three possible reasons to return now without processing:
414: */
415:
416: /*
1.3 christos 417: * - If we're simulating a firewall blocking UDP packets
418: * bigger than 'maxudp' bytes for testing purposes.
1.4 christos 419: */
420: maxudp = atomic_load(&sock->mgr->maxudp);
421: if ((maxudp != 0 && (uint32_t)nrecv > maxudp)) {
422: /*
423: * We need to keep the read_cb intact in case, so the
424: * readtimeout_cb can trigger and not crash because of
425: * missing read_req.
426: */
427: goto free;
428: }
429:
430: /*
431: * - If addr == NULL, in which case it's the end of stream;
432: * we can free the buffer and bail.
433: */
434: if (addr == NULL) {
435: failed_read_cb(sock, ISC_R_EOF);
436: goto free;
437: }
438:
439: /*
1.3 christos 440: * - If the socket is no longer active.
1.1 christos 441: */
1.4 christos 442: if (!isc__nmsocket_active(sock)) {
443: failed_read_cb(sock, ISC_R_CANCELED);
444: goto free;
445: }
446:
447: if (nrecv < 0) {
448: failed_read_cb(sock, isc__nm_uverr2result(nrecv));
449: goto free;
1.1 christos 450: }
451:
452: result = isc_sockaddr_fromsockaddr(&sockaddr, addr);
453: RUNTIME_CHECK(result == ISC_R_SUCCESS);
454:
1.4 christos 455: req = get_read_req(sock, &sockaddr);
456:
457: /*
458: * The callback will be called synchronously, because result is
459: * ISC_R_SUCCESS, so we are ok of passing the buf directly.
460: */
461: req->uvbuf.base = buf->base;
462: req->uvbuf.len = nrecv;
463:
464: sock->recv_read = false;
465:
466: REQUIRE(!sock->processing);
467: sock->processing = true;
468: isc__nm_readcb(sock, req, ISC_R_SUCCESS);
469: sock->processing = false;
470:
471: free:
1.1 christos 472: if (free_buf) {
473: isc__nm_free_uvbuf(sock, buf);
474: }
475: }
476:
477: /*
1.4 christos 478: * Send the data in 'region' to a peer via a UDP socket. We try to find
479: * a proper sibling/child socket so that we won't have to jump to
480: * another thread.
1.1 christos 481: */
1.4 christos 482: void
1.1 christos 483: isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
484: void *cbarg) {
485: isc_nmsocket_t *sock = handle->sock;
1.4 christos 486: isc_nmsocket_t *psock = NULL, *rsock = sock;
1.1 christos 487: isc_sockaddr_t *peer = &handle->peer;
488: isc__nm_uvreq_t *uvreq = NULL;
1.4 christos 489: uint32_t maxudp = atomic_load(&sock->mgr->maxudp);
1.1 christos 490: int ntid;
1.4 christos 491:
492: uvreq = isc__nm_uvreq_get(sock->mgr, sock);
493: uvreq->uvbuf.base = (char *)region->base;
494: uvreq->uvbuf.len = region->length;
495:
496: isc_nmhandle_attach(handle, &uvreq->handle);
497:
498: uvreq->cb.send = cb;
499: uvreq->cbarg = cbarg;
1.1 christos 500:
501: /*
1.3 christos 502: * We're simulating a firewall blocking UDP packets bigger than
1.1 christos 503: * 'maxudp' bytes, for testing purposes.
504: *
505: * The client would ordinarily have unreferenced the handle
506: * in the callback, but that won't happen in this case, so
507: * we need to do so here.
508: */
509: if (maxudp != 0 && region->length > maxudp) {
1.4 christos 510: isc__nm_uvreq_put(&uvreq, sock);
511: isc_nmhandle_detach(&handle); /* FIXME? */
512: return;
1.1 christos 513: }
514:
1.4 christos 515: if (sock->type == isc_nm_udpsocket && !atomic_load(&sock->client)) {
1.1 christos 516: INSIST(sock->parent != NULL);
517: psock = sock->parent;
518: } else if (sock->type == isc_nm_udplistener) {
519: psock = sock;
1.4 christos 520: } else if (!atomic_load(&sock->client)) {
1.1 christos 521: INSIST(0);
522: ISC_UNREACHABLE();
523: }
524:
525: /*
526: * If we're in the network thread, we can send directly. If the
1.4 christos 527: * handle is associated with a UDP socket, we can reuse its
528: * thread (assuming CPU affinity). Otherwise, pick a thread at
529: * random.
1.1 christos 530: */
531: if (isc__nm_in_netthread()) {
532: ntid = isc_nm_tid();
1.4 christos 533: } else if (sock->type == isc_nm_udpsocket &&
534: !atomic_load(&sock->client)) {
1.1 christos 535: ntid = sock->tid;
536: } else {
537: ntid = (int)isc_random_uniform(sock->nchildren);
538: }
539:
1.4 christos 540: if (psock != NULL) {
541: rsock = &psock->children[ntid];
542: }
1.1 christos 543:
1.4 christos 544: if (isc_nm_tid() == rsock->tid) {
545: isc__netievent_udpsend_t ievent
1.6 ! rillig 546: = { .sock = rsock, .req = uvreq, .peer = *peer };
1.1 christos 547:
1.4 christos 548: isc__nm_async_udpsend(NULL, (isc__netievent_t *)&ievent);
1.1 christos 549: } else {
1.4 christos 550: isc__netievent_udpsend_t *ievent =
551: isc__nm_get_netievent_udpsend(sock->mgr, rsock);
1.1 christos 552: ievent->peer = *peer;
553: ievent->req = uvreq;
554:
555: isc__nm_enqueue_ievent(&sock->mgr->workers[rsock->tid],
556: (isc__netievent_t *)ievent);
557: }
558: }
559:
560: /*
1.4 christos 561: * Asynchronous 'udpsend' event handler: send a packet on a UDP socket.
1.1 christos 562: */
563: void
564: isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0) {
1.4 christos 565: isc_result_t result;
1.1 christos 566: isc__netievent_udpsend_t *ievent = (isc__netievent_udpsend_t *)ev0;
1.4 christos 567: isc_nmsocket_t *sock = ievent->sock;
568: isc__nm_uvreq_t *uvreq = ievent->req;
569:
570: REQUIRE(sock->type == isc_nm_udpsocket);
571: REQUIRE(sock->tid == isc_nm_tid());
572: UNUSED(worker);
1.1 christos 573:
1.4 christos 574: if (inactive(sock)) {
575: failed_send_cb(sock, uvreq, ISC_R_CANCELED);
576: return;
577: }
1.1 christos 578:
1.4 christos 579: result = udp_send_direct(sock, uvreq, &ievent->peer);
580: if (result != ISC_R_SUCCESS) {
581: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_SENDFAIL]);
582: failed_send_cb(sock, uvreq, result);
1.1 christos 583: }
584: }
585:
586: static void
587: udp_send_cb(uv_udp_send_t *req, int status) {
588: isc_result_t result = ISC_R_SUCCESS;
1.4 christos 589: isc__nm_uvreq_t *uvreq = uv_handle_get_data((uv_handle_t *)req);
590: isc_nmsocket_t *sock = uvreq->sock;
1.1 christos 591:
592: REQUIRE(VALID_UVREQ(uvreq));
593: REQUIRE(VALID_NMHANDLE(uvreq->handle));
594:
595: if (status < 0) {
596: result = isc__nm_uverr2result(status);
1.4 christos 597: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_SENDFAIL]);
1.1 christos 598: }
599:
1.4 christos 600: isc__nm_sendcb(sock, uvreq, result);
1.1 christos 601: }
602:
603: /*
604: * udp_send_direct sends buf to a peer on a socket. Sock has to be in
605: * the same thread as the callee.
606: */
607: static isc_result_t
608: udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
609: isc_sockaddr_t *peer) {
1.4 christos 610: const struct sockaddr *sa = &peer->type.sa;
611: int r;
1.1 christos 612:
1.4 christos 613: REQUIRE(VALID_NMSOCK(sock));
614: REQUIRE(VALID_UVREQ(req));
1.1 christos 615: REQUIRE(sock->tid == isc_nm_tid());
616: REQUIRE(sock->type == isc_nm_udpsocket);
617:
1.4 christos 618: if (inactive(sock)) {
1.3 christos 619: return (ISC_R_CANCELED);
620: }
1.4 christos 621:
622: #ifdef HAVE_UV_UDP_CONNECT
623: /*
624: * If we used uv_udp_connect() (and not the shim version for
625: * older versions of libuv), then the peer address has to be
626: * set to NULL or else uv_udp_send() could fail or assert,
627: * depending on the libuv version.
628: */
629: if (atomic_load(&sock->connected)) {
630: sa = NULL;
631: }
632: #endif
633:
634: r = uv_udp_send(&req->uv_req.udp_send, &sock->uv_handle.udp,
635: &req->uvbuf, 1, sa, udp_send_cb);
636: if (r < 0) {
637: return (isc__nm_uverr2result(r));
1.1 christos 638: }
639:
640: return (ISC_R_SUCCESS);
641: }
1.4 christos 642:
643: static isc_result_t
644: udp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
645: isc__networker_t *worker = NULL;
646: int uv_bind_flags = UV_UDP_REUSEADDR;
647: isc_result_t result = ISC_R_DEFAULT;
648: int r;
649:
650: REQUIRE(isc__nm_in_netthread());
651: REQUIRE(sock->tid == isc_nm_tid());
652:
653: worker = &sock->mgr->workers[isc_nm_tid()];
654:
655: atomic_store(&sock->connecting, true);
656:
657: r = uv_udp_init(&worker->loop, &sock->uv_handle.udp);
658: RUNTIME_CHECK(r == 0);
659: uv_handle_set_data(&sock->uv_handle.handle, sock);
660:
661: r = uv_timer_init(&worker->loop, &sock->timer);
662: RUNTIME_CHECK(r == 0);
663: uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
664:
665: r = uv_udp_open(&sock->uv_handle.udp, sock->fd);
666: if (r != 0) {
667: isc__nm_closesocket(sock->fd);
668: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPENFAIL]);
669: goto done;
670: }
671: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_OPEN]);
672:
673: if (sock->iface->addr.type.sa.sa_family == AF_INET6) {
674: uv_bind_flags |= UV_UDP_IPV6ONLY;
675: }
676:
677: r = uv_udp_bind(&sock->uv_handle.udp, &sock->iface->addr.type.sa,
678: uv_bind_flags);
679: if (r != 0) {
680: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_BINDFAIL]);
681: goto done;
682: }
683:
684: #ifdef ISC_RECV_BUFFER_SIZE
685: uv_recv_buffer_size(&sock->uv_handle.handle,
686: &(int){ ISC_RECV_BUFFER_SIZE });
687: #endif
688: #ifdef ISC_SEND_BUFFER_SIZE
689: uv_send_buffer_size(&sock->uv_handle.handle,
690: &(int){ ISC_SEND_BUFFER_SIZE });
691: #endif
692:
693: r = isc_uv_udp_connect(&sock->uv_handle.udp, &req->peer.type.sa);
694: if (r != 0) {
695: isc__nm_incstats(sock->mgr,
696: sock->statsindex[STATID_CONNECTFAIL]);
697: goto done;
698: }
699: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]);
700:
701: atomic_store(&sock->connecting, false);
702: atomic_store(&sock->connected, true);
703:
704: done:
705: result = isc__nm_uverr2result(r);
706:
707: LOCK(&sock->lock);
708: sock->result = result;
709: SIGNAL(&sock->cond);
710: if (!atomic_load(&sock->active)) {
711: WAIT(&sock->scond, &sock->lock);
712: }
713: INSIST(atomic_load(&sock->active));
714: UNLOCK(&sock->lock);
715:
716: return (result);
717: }
718:
719: /*
720: * Asynchronous 'udpconnect' call handler: open a new UDP socket and
721: * call the 'open' callback with a handle.
722: */
723: void
724: isc__nm_async_udpconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
725: isc__netievent_udpconnect_t *ievent =
726: (isc__netievent_udpconnect_t *)ev0;
727: isc_nmsocket_t *sock = ievent->sock;
728: isc__nm_uvreq_t *req = ievent->req;
729: isc_result_t result;
730:
731: UNUSED(worker);
732:
733: REQUIRE(VALID_NMSOCK(sock));
734: REQUIRE(sock->type == isc_nm_udpsocket);
735: REQUIRE(sock->iface != NULL);
736: REQUIRE(sock->parent == NULL);
737: REQUIRE(sock->tid == isc_nm_tid());
738:
739: req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface->addr);
740: result = udp_connect_direct(sock, req);
741: if (result != ISC_R_SUCCESS) {
742: atomic_store(&sock->active, false);
743: isc__nm_udp_close(sock);
744: isc__nm_uvreq_put(&req, sock);
745: } else {
746: /*
747: * The callback has to be called after the socket has been
748: * initialized
749: */
750: isc__nm_connectcb(sock, req, ISC_R_SUCCESS);
751: }
752:
753: /*
754: * The sock is now attached to the handle.
755: */
756: isc__nmsocket_detach(&sock);
757: }
758:
759: isc_result_t
760: isc_nm_udpconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
761: isc_nm_cb_t cb, void *cbarg, unsigned int timeout,
762: size_t extrahandlesize) {
763: isc_result_t result = ISC_R_SUCCESS;
764: isc_nmsocket_t *sock = NULL;
765: isc__netievent_udpconnect_t *event = NULL;
766: isc__nm_uvreq_t *req = NULL;
767: sa_family_t sa_family;
768: uv_os_sock_t fd;
769:
770: REQUIRE(VALID_NM(mgr));
771: REQUIRE(local != NULL);
772: REQUIRE(peer != NULL);
773:
774: sa_family = peer->addr.type.sa.sa_family;
775:
776: /*
777: * The socket() call can fail spuriously on FreeBSD 12, so we
778: * need to handle the failure early and gracefully.
779: */
780: result = isc__nm_socket(sa_family, SOCK_DGRAM, 0, &fd);
781: if (result != ISC_R_SUCCESS) {
782: return (result);
783: }
784:
785: sock = isc_mem_get(mgr->mctx, sizeof(isc_nmsocket_t));
786: isc__nmsocket_init(sock, mgr, isc_nm_udpsocket, local);
787:
788: sock->connect_cb = cb;
789: sock->connect_cbarg = cbarg;
790: sock->read_timeout = timeout;
791: sock->extrahandlesize = extrahandlesize;
792: sock->peer = peer->addr;
793: sock->fd = fd;
794: sock->result = ISC_R_DEFAULT;
795: atomic_init(&sock->client, true);
796:
797: result = isc__nm_socket_reuse(sock->fd);
798: RUNTIME_CHECK(result == ISC_R_SUCCESS ||
799: result == ISC_R_NOTIMPLEMENTED);
800:
801: result = isc__nm_socket_reuse_lb(sock->fd);
802: RUNTIME_CHECK(result == ISC_R_SUCCESS ||
803: result == ISC_R_NOTIMPLEMENTED);
804:
805: (void)isc__nm_socket_incoming_cpu(sock->fd);
806:
807: (void)isc__nm_socket_dontfrag(sock->fd, sa_family);
808:
809: req = isc__nm_uvreq_get(mgr, sock);
810: req->cb.connect = cb;
811: req->cbarg = cbarg;
812: req->peer = peer->addr;
813: req->local = local->addr;
814:
815: event = isc__nm_get_netievent_udpconnect(mgr, sock, req);
816:
817: if (isc__nm_in_netthread()) {
818: atomic_store(&sock->active, true);
819: sock->tid = isc_nm_tid();
820: isc__nm_async_udpconnect(&mgr->workers[sock->tid],
821: (isc__netievent_t *)event);
822: isc__nm_put_netievent_udpconnect(mgr, event);
823: } else {
824: atomic_init(&sock->active, false);
825: sock->tid = isc_random_uniform(mgr->nworkers);
826: isc__nm_enqueue_ievent(&mgr->workers[sock->tid],
827: (isc__netievent_t *)event);
828: }
829: LOCK(&sock->lock);
830: result = sock->result;
831: while (sock->result == ISC_R_DEFAULT) {
832: WAIT(&sock->cond, &sock->lock);
833: result = sock->result;
834: }
835: atomic_store(&sock->active, true);
836: BROADCAST(&sock->scond);
837: UNLOCK(&sock->lock);
838: ENSURE(result != ISC_R_DEFAULT);
839:
840: return (result);
841: }
842:
843: static void
844: udp_read_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
845: const struct sockaddr *addr, unsigned flags) {
846: isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)handle);
847: REQUIRE(VALID_NMSOCK(sock));
848:
849: udp_recv_cb(handle, nrecv, buf, addr, flags);
850: /*
851: * If a caller calls isc_nm_read() on a listening socket, we can
852: * get here, but we MUST NOT stop reading from the listener
853: * socket. The only difference between listener and connected
854: * sockets is that the former has sock->parent set and later
855: * does not.
856: */
857: if (!sock->parent) {
858: stop_reading(sock);
859: }
860: }
861:
862: static void
863: failed_read_cb(isc_nmsocket_t *sock, isc_result_t result) {
864: REQUIRE(VALID_NMSOCK(sock));
865: REQUIRE(result != ISC_R_SUCCESS);
866:
867: if (atomic_load(&sock->client)) {
868: stop_reading(sock);
869:
870: if (!sock->recv_read) {
871: goto destroy;
872: }
873: sock->recv_read = false;
874:
875: if (sock->recv_cb != NULL) {
876: isc__nm_uvreq_t *req = get_read_req(sock, NULL);
877: isc__nmsocket_clearcb(sock);
878: isc__nm_readcb(sock, req, result);
879: }
880:
881: destroy:
882: isc__nmsocket_prep_destroy(sock);
883: return;
884: }
885:
886: /*
887: * For UDP server socket, we don't have child socket via
888: * "accept", so we:
889: * - we continue to read
890: * - we don't clear the callbacks
891: * - we don't destroy it (only stoplistening could do that)
892: */
893: if (!sock->recv_read) {
894: return;
895: }
896: sock->recv_read = false;
897:
898: if (sock->recv_cb != NULL) {
899: isc__nm_uvreq_t *req = get_read_req(sock, NULL);
900: isc__nm_readcb(sock, req, result);
901: }
902: }
903:
904: static void
905: failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
906: isc_result_t eresult) {
907: REQUIRE(VALID_NMSOCK(sock));
908: REQUIRE(VALID_UVREQ(req));
909:
910: if (req->cb.send != NULL) {
911: isc__nm_sendcb(sock, req, eresult);
912: } else {
913: isc__nm_uvreq_put(&req, sock);
914: }
915: }
916:
917: static isc__nm_uvreq_t *
918: get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr) {
919: isc__nm_uvreq_t *req = NULL;
920:
921: req = isc__nm_uvreq_get(sock->mgr, sock);
922: req->cb.recv = sock->recv_cb;
923: req->cbarg = sock->recv_cbarg;
924:
925: if (atomic_load(&sock->client)) {
926: isc_nmhandle_attach(sock->statichandle, &req->handle);
927: } else {
928: req->handle = isc__nmhandle_get(sock, sockaddr, NULL);
929: }
930:
931: return req;
932: }
933:
934: static void
935: readtimeout_cb(uv_timer_t *handle) {
936: isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)handle);
937:
938: REQUIRE(VALID_NMSOCK(sock));
939: REQUIRE(sock->tid == isc_nm_tid());
940: REQUIRE(sock->reading);
941:
942: /*
943: * Timeout; stop reading and process whatever we have.
944: */
945: failed_read_cb(sock, ISC_R_TIMEDOUT);
946: }
947:
948: /*
949: * Asynchronous 'udpread' call handler: start or resume reading on a
950: * socket; pause reading and call the 'recv' callback after each
951: * datagram.
952: */
953: void
954: isc__nm_async_udpread(isc__networker_t *worker, isc__netievent_t *ev0) {
955: isc__netievent_udpread_t *ievent = (isc__netievent_udpread_t *)ev0;
956: isc_nmsocket_t *sock = ievent->sock;
957:
958: UNUSED(worker);
959:
960: REQUIRE(VALID_NMSOCK(sock));
961: REQUIRE(sock->tid == isc_nm_tid());
962:
963: if (inactive(sock)) {
964: sock->reading = true;
965: failed_read_cb(sock, ISC_R_CANCELED);
966: return;
967: }
968:
969: start_reading(sock);
970: }
971:
972: static void
973: start_sock_timer(isc_nmsocket_t *sock) {
974: if (sock->read_timeout > 0) {
975: int r = uv_timer_start(&sock->timer, readtimeout_cb,
976: sock->read_timeout, 0);
977: REQUIRE(r == 0);
978: }
979: }
980:
981: static void
982: stop_sock_timer(isc_nmsocket_t *sock) {
983: int r = uv_timer_stop(&sock->timer);
984: REQUIRE(r == 0);
985: }
986:
987: static void
988: start_reading(isc_nmsocket_t *sock) {
989: if (sock->reading) {
990: return;
991: }
992:
993: int r = uv_udp_recv_start(&sock->uv_handle.udp, udp_alloc_cb,
994: udp_read_cb);
995: REQUIRE(r == 0);
996: sock->reading = true;
997:
998: start_sock_timer(sock);
999: }
1000:
1001: static void
1002: stop_reading(isc_nmsocket_t *sock) {
1003: if (!sock->reading) {
1004: return;
1005: }
1006:
1007: int r = uv_udp_recv_stop(&sock->uv_handle.udp);
1008: REQUIRE(r == 0);
1009: sock->reading = false;
1010:
1011: stop_sock_timer(sock);
1012: }
1013:
1014: void
1015: isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
1016: REQUIRE(VALID_NMHANDLE(handle));
1017: REQUIRE(VALID_NMSOCK(handle->sock));
1018:
1019: isc_nmsocket_t *sock = handle->sock;
1020:
1021: REQUIRE(sock->type == isc_nm_udpsocket);
1022: REQUIRE(sock->statichandle == handle);
1023: REQUIRE(sock->tid == isc_nm_tid());
1024: REQUIRE(!sock->recv_read);
1025:
1026: sock->recv_cb = cb;
1027: sock->recv_cbarg = cbarg;
1028: sock->recv_read = true;
1029:
1030: if (!sock->reading && sock->tid == isc_nm_tid()) {
1031: isc__netievent_udpread_t ievent = { .sock = sock };
1032: isc__nm_async_udpread(NULL, (isc__netievent_t *)&ievent);
1033: } else {
1034: isc__netievent_udpread_t *ievent =
1035: isc__nm_get_netievent_udpread(sock->mgr, sock);
1036: isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
1037: (isc__netievent_t *)ievent);
1038: }
1039: }
1040:
1041: static void
1042: udp_stop_cb(uv_handle_t *handle) {
1043: isc_nmsocket_t *sock = uv_handle_get_data(handle);
1044: uv_handle_set_data(handle, NULL);
1045:
1046: REQUIRE(VALID_NMSOCK(sock));
1047: REQUIRE(sock->tid == isc_nm_tid());
1048: REQUIRE(atomic_load(&sock->closing));
1049:
1050: if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
1051: true)) {
1052: INSIST(0);
1053: ISC_UNREACHABLE();
1054: }
1055:
1056: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CLOSE]);
1057:
1058: atomic_store(&sock->listening, false);
1059:
1060: isc__nmsocket_detach(&sock);
1061: }
1062:
1063: static void
1064: udp_close_cb(uv_handle_t *handle) {
1065: isc_nmsocket_t *sock = uv_handle_get_data(handle);
1066: uv_handle_set_data(handle, NULL);
1067:
1068: REQUIRE(VALID_NMSOCK(sock));
1069: REQUIRE(sock->tid == isc_nm_tid());
1070: REQUIRE(atomic_load(&sock->closing));
1071:
1072: if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false },
1073: true)) {
1074: INSIST(0);
1075: ISC_UNREACHABLE();
1076: }
1077:
1078: isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CLOSE]);
1079:
1080: if (sock->server != NULL) {
1081: isc__nmsocket_detach(&sock->server);
1082: }
1083:
1084: atomic_store(&sock->connected, false);
1085: atomic_store(&sock->listening, false);
1086:
1087: isc__nmsocket_prep_destroy(sock);
1088: }
1089:
1090: static void
1091: timer_close_cb(uv_handle_t *handle) {
1092: isc_nmsocket_t *sock = uv_handle_get_data(handle);
1093: uv_handle_set_data(handle, NULL);
1094:
1095: if (sock->parent) {
1096: uv_close(&sock->uv_handle.handle, udp_stop_cb);
1097: } else {
1098: uv_close(&sock->uv_handle.handle, udp_close_cb);
1099: }
1100: }
1101:
1102: static void
1103: stop_udp_child(isc_nmsocket_t *sock) {
1104: REQUIRE(sock->type == isc_nm_udpsocket);
1105: REQUIRE(sock->tid == isc_nm_tid());
1106:
1107: if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
1108: true)) {
1109: return;
1110: }
1111:
1112: udp_close_direct(sock);
1113:
1114: LOCK(&sock->parent->lock);
1115: sock->parent->rchildren -= 1;
1116: UNLOCK(&sock->parent->lock);
1117: BROADCAST(&sock->parent->cond);
1118: }
1119:
1120: static void
1121: stop_udp_parent(isc_nmsocket_t *sock) {
1122: REQUIRE(VALID_NMSOCK(sock));
1123: REQUIRE(sock->type == isc_nm_udplistener);
1124:
1125: for (size_t i = 0; i < sock->nchildren; i++) {
1126: isc__netievent_udpstop_t *ievent = NULL;
1127: isc_nmsocket_t *csock = &sock->children[i];
1128: REQUIRE(VALID_NMSOCK(csock));
1129:
1130: atomic_store(&csock->active, false);
1131:
1132: if (csock->tid == isc_nm_tid()) {
1133: stop_udp_child(csock);
1134: continue;
1135: }
1136:
1137: ievent = isc__nm_get_netievent_udpstop(sock->mgr, csock);
1138: isc__nm_enqueue_ievent(&sock->mgr->workers[i],
1139: (isc__netievent_t *)ievent);
1140: }
1141:
1142: LOCK(&sock->lock);
1143: while (sock->rchildren > 0) {
1144: WAIT(&sock->cond, &sock->lock);
1145: }
1146: atomic_store(&sock->closed, true);
1147: UNLOCK(&sock->lock);
1148:
1149: isc__nmsocket_prep_destroy(sock);
1150: }
1151:
1152: static void
1153: udp_close_direct(isc_nmsocket_t *sock) {
1154: REQUIRE(VALID_NMSOCK(sock));
1155: REQUIRE(sock->tid == isc_nm_tid());
1156:
1157: uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
1158: }
1159:
1160: void
1161: isc__nm_async_udpclose(isc__networker_t *worker, isc__netievent_t *ev0) {
1162: isc__netievent_udpclose_t *ievent = (isc__netievent_udpclose_t *)ev0;
1163: isc_nmsocket_t *sock = ievent->sock;
1164:
1165: REQUIRE(VALID_NMSOCK(sock));
1166: REQUIRE(sock->tid == isc_nm_tid());
1167: UNUSED(worker);
1168:
1169: udp_close_direct(sock);
1170: }
1171:
1172: void
1173: isc__nm_udp_close(isc_nmsocket_t *sock) {
1174: REQUIRE(VALID_NMSOCK(sock));
1175: REQUIRE(sock->type == isc_nm_udpsocket);
1176: REQUIRE(!isc__nmsocket_active(sock));
1177:
1178: if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
1179: true)) {
1180: return;
1181: }
1182:
1183: if (sock->tid == isc_nm_tid()) {
1184: udp_close_direct(sock);
1185: } else {
1186: isc__netievent_udpclose_t *ievent =
1187: isc__nm_get_netievent_udpclose(sock->mgr, sock);
1188: isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
1189: (isc__netievent_t *)ievent);
1190: }
1191: }
1192:
1193: void
1194: isc__nm_udp_shutdown(isc_nmsocket_t *sock) {
1195: REQUIRE(VALID_NMSOCK(sock));
1196: REQUIRE(sock->tid == isc_nm_tid());
1197: REQUIRE(sock->type == isc_nm_udpsocket);
1198:
1199: /*
1200: * If the socket is active, mark it inactive and
1201: * continue. If it isn't active, stop now.
1202: */
1203: if (!isc__nmsocket_deactivate(sock)) {
1204: return;
1205: }
1206:
1207: /*
1208: * If the socket is connecting, the cancel will happen in the
1209: * async_udpconnect() due socket being inactive now.
1210: */
1211: if (atomic_load(&sock->connecting)) {
1212: return;
1213: }
1214:
1215: /*
1216: * When the client detaches the last handle, the
1217: * sock->statichandle would be NULL, in that case, nobody is
1218: * interested in the callback.
1219: */
1220: if (sock->statichandle) {
1221: failed_read_cb(sock, ISC_R_CANCELED);
1222: return;
1223: }
1224:
1225: /*
1226: * Otherwise, we just send the socket to abyss...
1227: */
1228: if (sock->parent == NULL) {
1229: isc__nmsocket_prep_destroy(sock);
1230: }
1231: }
1232:
1233: void
1234: isc__nm_udp_cancelread(isc_nmhandle_t *handle) {
1235: isc_nmsocket_t *sock = NULL;
1236: isc__netievent_udpcancel_t *ievent = NULL;
1237:
1238: REQUIRE(VALID_NMHANDLE(handle));
1239:
1240: sock = handle->sock;
1241:
1242: REQUIRE(VALID_NMSOCK(sock));
1243: REQUIRE(sock->type == isc_nm_udpsocket);
1244:
1245: ievent = isc__nm_get_netievent_udpcancel(sock->mgr, sock, handle);
1246:
1247: isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
1248: (isc__netievent_t *)ievent);
1249: }
1250:
1251: void
1252: isc__nm_async_udpcancel(isc__networker_t *worker, isc__netievent_t *ev0) {
1253: isc__netievent_udpcancel_t *ievent = (isc__netievent_udpcancel_t *)ev0;
1254: isc_nmsocket_t *sock = NULL;
1255:
1256: UNUSED(worker);
1257:
1258: REQUIRE(VALID_NMSOCK(ievent->sock));
1259:
1260: sock = ievent->sock;
1261:
1262: REQUIRE(sock->tid == isc_nm_tid());
1263: REQUIRE(atomic_load(&sock->client));
1264:
1265: failed_read_cb(sock, ISC_R_EOF);
1266: }
1267:
1268: void
1269: isc__nm_udp_settimeout(isc_nmhandle_t *handle, uint32_t timeout) {
1270: REQUIRE(VALID_NMHANDLE(handle));
1271: REQUIRE(VALID_NMSOCK(handle->sock));
1272:
1273: isc_nmsocket_t *sock = handle->sock;
1274:
1275: sock->read_timeout = timeout;
1276: if (uv_is_active((uv_handle_t *)&sock->timer)) {
1277: start_sock_timer(sock);
1278: }
1279: }
CVSweb <webmaster@jp.NetBSD.org>