3 * Copyright 2017 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
21 #include "src/core/lib/iomgr/port.h"
23 #include <grpc/support/log.h>
25 /* This polling engine is only relevant on linux kernels supporting epoll() */
26 #ifdef GRPC_LINUX_EPOLL_CREATE1
28 #include "src/core/lib/iomgr/ev_epollex_linux.h"
36 #include <sys/epoll.h>
37 #include <sys/socket.h>
38 #include <sys/syscall.h>
41 #include <grpc/support/alloc.h>
42 #include <grpc/support/string_util.h>
44 #include "src/core/lib/debug/stats.h"
45 #include "src/core/lib/gpr/spinlock.h"
46 #include "src/core/lib/gpr/tls.h"
47 #include "src/core/lib/gpr/useful.h"
48 #include "src/core/lib/gprpp/inlined_vector.h"
49 #include "src/core/lib/gprpp/manual_constructor.h"
50 #include "src/core/lib/gprpp/ref_counted.h"
51 #include "src/core/lib/gprpp/sync.h"
52 #include "src/core/lib/iomgr/block_annotate.h"
53 #include "src/core/lib/iomgr/iomgr_internal.h"
54 #include "src/core/lib/iomgr/is_epollexclusive_available.h"
55 #include "src/core/lib/iomgr/lockfree_event.h"
56 #include "src/core/lib/iomgr/sys_epoll_wrapper.h"
57 #include "src/core/lib/iomgr/timer.h"
58 #include "src/core/lib/iomgr/wakeup_fd_posix.h"
59 #include "src/core/lib/profiling/timers.h"
61 // debug aid: create workers on the heap (allows asan to spot
62 // use-after-destruction)
63 //#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1
65 #define MAX_EPOLL_EVENTS 100
66 // TODO(juanlishen): We use a greater-than-one value here as a workaround fix to
67 // a keepalive ping timeout issue. We may want to revert https://github
68 // .com/grpc/grpc/pull/14943 once we figure out the root cause.
69 #define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 16
70 #define MAX_FDS_IN_CACHE 32
72 grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false,
75 /*******************************************************************************
76 * pollable Declarations
79 typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
81 typedef struct pollable pollable;
83 /// A pollable is something that can be polled: it has an epoll set to poll on,
84 /// and a wakeup fd for kicks
85 /// There are three broad types:
86 /// - PO_EMPTY - the empty pollable, used before file descriptors are added to
88 /// - PO_FD - a pollable containing only one FD - used to optimize single-fd
89 /// pollsets (which are common with synchronous api usage)
90 /// - PO_MULTI - a pollable containing many fds
92 pollable_type type; // immutable
93 grpc_core::RefCount refs;
96 grpc_wakeup_fd wakeup;
98 // The following are relevant only for type PO_FD
99 grpc_fd* owner_fd; // Set to the owner_fd if the type is PO_FD
100 gpr_mu owner_orphan_mu; // Synchronizes access to owner_orphaned field
101 bool owner_orphaned; // Is the owner fd orphaned
103 grpc_pollset_set* pollset_set;
108 grpc_pollset_worker* root_worker;
112 struct epoll_event events[MAX_EPOLL_EVENTS];
115 static const char* pollable_type_string(pollable_type t) {
127 static char* pollable_desc(pollable* p) {
129 gpr_asprintf(&out, "type=%s epfd=%d wakeup=%d", pollable_type_string(p->type),
130 p->epfd, p->wakeup.read_fd);
134 /// Shared empty pollable - used by pollset to poll on until the first fd is
136 static pollable* g_empty_pollable;
138 static grpc_error* pollable_create(pollable_type type, pollable** p);
139 static pollable* pollable_ref(pollable* p,
140 const grpc_core::DebugLocation& dbg_loc,
141 const char* reason) {
142 p->refs.Ref(dbg_loc, reason);
145 static void pollable_unref(pollable* p, const grpc_core::DebugLocation& dbg_loc,
146 const char* reason) {
147 if (p == nullptr) return;
148 if (GPR_UNLIKELY(p != nullptr && p->refs.Unref(dbg_loc, reason))) {
149 GRPC_FD_TRACE("pollable_unref: Closing epfd: %d", p->epfd);
151 grpc_wakeup_fd_destroy(&p->wakeup);
152 gpr_mu_destroy(&p->owner_orphan_mu);
153 gpr_mu_destroy(&p->mu);
157 #define POLLABLE_REF(p, r) pollable_ref((p), DEBUG_LOCATION, (r))
158 #define POLLABLE_UNREF(p, r) pollable_unref((p), DEBUG_LOCATION, (r))
160 /*******************************************************************************
165 grpc_fd(int fd, const char* name, bool track_err)
166 : fd(fd), track_err(track_err) {
167 gpr_mu_init(&orphan_mu);
168 gpr_mu_init(&pollable_mu);
169 read_closure.InitEvent();
170 write_closure.InitEvent();
171 error_closure.InitEvent();
174 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
175 grpc_iomgr_register_object(&iomgr_object, fd_name);
177 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
178 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, this, fd_name);
184 // This is really the dtor, but the poller threads waking up from
185 // epoll_wait() may access the (read|write|error)_closure after destruction.
186 // Since the object will be added to the free pool, this behavior is
187 // not going to cause issues, except spurious events if the FD is reused
188 // while the race happens.
190 grpc_iomgr_unregister_object(&iomgr_object);
192 POLLABLE_UNREF(pollable_obj, "fd_pollable");
194 gpr_mu_destroy(&pollable_mu);
195 gpr_mu_destroy(&orphan_mu);
197 read_closure.DestroyEvent();
198 write_closure.DestroyEvent();
199 error_closure.DestroyEvent();
205 /* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
206 * hard-to-debug cases where fd fields are accessed even after calling
207 * fd_destroy(). The following invalidates fd fields to make catching such
211 gpr_atm_no_barrier_store(&refst, -1);
212 memset(&orphan_mu, -1, sizeof(orphan_mu));
213 memset(&pollable_mu, -1, sizeof(pollable_mu));
214 pollable_obj = nullptr;
215 on_done_closure = nullptr;
216 memset(&iomgr_object, -1, sizeof(iomgr_object));
226 // bit 0 : 1=Active / 0=Orphaned
227 // bits 1-n : refcount
228 // Ref/Unref by two to avoid altering the orphaned bit
233 // Protects pollable_obj and pollset_fds.
235 grpc_core::InlinedVector<int, 1> pollset_fds; // Used in PO_MULTI.
236 pollable* pollable_obj = nullptr; // Used in PO_FD.
238 grpc_core::LockfreeEvent read_closure;
239 grpc_core::LockfreeEvent write_closure;
240 grpc_core::LockfreeEvent error_closure;
242 struct grpc_fd* freelist_next = nullptr;
243 grpc_closure* on_done_closure = nullptr;
245 grpc_iomgr_object iomgr_object;
247 // Do we need to track EPOLLERR events separately?
251 static void fd_global_init(void);
252 static void fd_global_shutdown(void);
254 /*******************************************************************************
255 * Pollset Declarations
259 grpc_pollset_worker* next;
260 grpc_pollset_worker* prev;
263 typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks;
265 struct grpc_pollset_worker {
269 // debug aid: which thread started this worker
273 grpc_pollset* pollset;
274 pollable* pollable_obj;
276 pwlink links[PWLINK_COUNT];
279 struct grpc_pollset {
281 gpr_atm worker_count;
282 gpr_atm active_pollable_type;
283 pollable* active_pollable;
284 bool kicked_without_poller;
285 grpc_closure* shutdown_closure;
286 bool already_shutdown;
287 grpc_pollset_worker* root_worker;
288 int containing_pollset_set_count;
291 /*******************************************************************************
292 * Pollset-set Declarations
295 struct grpc_pollset_set {
296 grpc_core::RefCount refs;
298 grpc_pollset_set* parent;
300 size_t pollset_count;
301 size_t pollset_capacity;
302 grpc_pollset** pollsets;
309 /*******************************************************************************
313 static bool append_error(grpc_error** composite, grpc_error* error,
315 if (error == GRPC_ERROR_NONE) return true;
316 if (*composite == GRPC_ERROR_NONE) {
317 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
319 *composite = grpc_error_add_child(*composite, error);
323 /*******************************************************************************
327 /* We need to keep a freelist not because of any concerns of malloc performance
328 * but instead so that implementations with multiple threads in (for example)
329 * epoll_wait deal with the race between pollset removal and incoming poll
332 * The problem is that the poller ultimately holds a reference to this
333 * object, so it is very difficult to know when is safe to free it, at least
334 * without some expensive synchronization.
336 * If we keep the object freelisted, in the worst case losing this race just
337 * becomes a spurious read notification on a reused fd.
340 static grpc_fd* fd_freelist = nullptr;
341 static gpr_mu fd_freelist_mu;
344 #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
345 #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
346 static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
348 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
350 "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
351 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
352 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
355 #define REF_BY(fd, n, reason) ref_by(fd, n)
356 #define UNREF_BY(fd, n, reason) unref_by(fd, n)
357 static void ref_by(grpc_fd* fd, int n) {
359 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
362 /* Uninitialize and add to the freelist */
363 static void fd_destroy(void* arg, grpc_error* error) {
364 grpc_fd* fd = static_cast<grpc_fd*>(arg);
367 /* Add the fd to the freelist */
368 gpr_mu_lock(&fd_freelist_mu);
369 fd->freelist_next = fd_freelist;
371 gpr_mu_unlock(&fd_freelist_mu);
375 static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
377 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
379 "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
380 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
381 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
384 static void unref_by(grpc_fd* fd, int n) {
386 gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
389 GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
396 static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
398 static void fd_global_shutdown(void) {
399 // TODO(guantaol): We don't have a reasonable explanation about this
400 // lock()/unlock() pattern. It can be a valid barrier if there is at most one
401 // pending lock() at this point. Otherwise, there is still a possibility of
402 // use-after-free race. Need to reason about the code and/or clean it up.
403 gpr_mu_lock(&fd_freelist_mu);
404 gpr_mu_unlock(&fd_freelist_mu);
405 while (fd_freelist != nullptr) {
406 grpc_fd* fd = fd_freelist;
407 fd_freelist = fd_freelist->freelist_next;
410 gpr_mu_destroy(&fd_freelist_mu);
413 static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
414 grpc_fd* new_fd = nullptr;
416 gpr_mu_lock(&fd_freelist_mu);
417 if (fd_freelist != nullptr) {
418 new_fd = fd_freelist;
419 fd_freelist = fd_freelist->freelist_next;
421 gpr_mu_unlock(&fd_freelist_mu);
423 if (new_fd == nullptr) {
424 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
427 return new (new_fd) grpc_fd(fd, name, track_err);
430 static int fd_wrapped_fd(grpc_fd* fd) {
432 return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
435 static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
436 const char* reason) {
437 bool is_fd_closed = false;
439 gpr_mu_lock(&fd->orphan_mu);
441 // Get the fd->pollable_obj and set the owner_orphaned on that pollable to
442 // true so that the pollable will no longer access its owner_fd field.
443 gpr_mu_lock(&fd->pollable_mu);
444 pollable* pollable_obj = fd->pollable_obj;
447 gpr_mu_lock(&pollable_obj->owner_orphan_mu);
448 pollable_obj->owner_orphaned = true;
451 fd->on_done_closure = on_done;
453 /* If release_fd is not NULL, we should be relinquishing control of the file
454 descriptor fd->fd (but we still own the grpc_fd structure). */
455 if (release_fd != nullptr) {
456 // Remove the FD from all epolls sets, before releasing it.
457 // Otherwise, we will receive epoll events after we release the FD.
459 memset(&ev_fd, 0, sizeof(ev_fd));
460 if (pollable_obj != nullptr) { // For PO_FD.
461 epoll_ctl(pollable_obj->epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
463 for (size_t i = 0; i < fd->pollset_fds.size(); ++i) { // For PO_MULTI.
464 const int epfd = fd->pollset_fds[i];
465 epoll_ctl(epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
467 *release_fd = fd->fd;
473 // TODO(sreek): handle fd removal (where is_fd_closed=false)
475 GRPC_FD_TRACE("epoll_fd %p (%d) was orphaned but not closed.", fd, fd->fd);
478 /* Remove the active status but keep referenced. We want this grpc_fd struct
479 to be alive (and not added to freelist) until the end of this function */
480 REF_BY(fd, 1, reason);
482 GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE);
485 gpr_mu_unlock(&pollable_obj->owner_orphan_mu);
488 gpr_mu_unlock(&fd->pollable_mu);
489 gpr_mu_unlock(&fd->orphan_mu);
491 UNREF_BY(fd, 2, reason); /* Drop the reference */
494 static bool fd_is_shutdown(grpc_fd* fd) {
495 return fd->read_closure.IsShutdown();
498 /* Might be called multiple times */
499 static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
500 if (fd->read_closure.SetShutdown(GRPC_ERROR_REF(why))) {
501 if (shutdown(fd->fd, SHUT_RDWR)) {
502 if (errno != ENOTCONN) {
503 gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
504 grpc_fd_wrapped_fd(fd), errno);
507 fd->write_closure.SetShutdown(GRPC_ERROR_REF(why));
508 fd->error_closure.SetShutdown(GRPC_ERROR_REF(why));
510 GRPC_ERROR_UNREF(why);
513 static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
514 fd->read_closure.NotifyOn(closure);
517 static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
518 fd->write_closure.NotifyOn(closure);
521 static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
522 fd->error_closure.NotifyOn(closure);
525 static bool fd_has_pollset(grpc_fd* fd, grpc_pollset* pollset) {
526 const int epfd = pollset->active_pollable->epfd;
527 grpc_core::MutexLock lock(&fd->pollable_mu);
528 for (size_t i = 0; i < fd->pollset_fds.size(); ++i) {
529 if (fd->pollset_fds[i] == epfd) {
536 static void fd_add_pollset(grpc_fd* fd, grpc_pollset* pollset) {
537 const int epfd = pollset->active_pollable->epfd;
538 grpc_core::MutexLock lock(&fd->pollable_mu);
539 fd->pollset_fds.push_back(epfd);
542 /*******************************************************************************
543 * Pollable Definitions
546 static grpc_error* pollable_create(pollable_type type, pollable** p) {
549 int epfd = epoll_create1(EPOLL_CLOEXEC);
551 return GRPC_OS_ERROR(errno, "epoll_create1");
553 GRPC_FD_TRACE("Pollable_create: created epfd: %d (type: %d)", epfd, type);
554 *p = static_cast<pollable*>(gpr_malloc(sizeof(**p)));
555 grpc_error* err = grpc_wakeup_fd_init(&(*p)->wakeup);
556 if (err != GRPC_ERROR_NONE) {
558 "Pollable_create: closed epfd: %d (type: %d). wakeupfd_init error",
565 struct epoll_event ev;
566 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
567 ev.data.ptr = (void*)(1 | (intptr_t) & (*p)->wakeup);
568 if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
569 err = GRPC_OS_ERROR(errno, "epoll_ctl");
571 "Pollable_create: closed epfd: %d (type: %d). epoll_ctl error", epfd,
574 grpc_wakeup_fd_destroy(&(*p)->wakeup);
581 new (&(*p)->refs) grpc_core::RefCount(1, &grpc_trace_pollable_refcount);
582 gpr_mu_init(&(*p)->mu);
584 (*p)->owner_fd = nullptr;
585 gpr_mu_init(&(*p)->owner_orphan_mu);
586 (*p)->owner_orphaned = false;
587 (*p)->pollset_set = nullptr;
588 (*p)->next = (*p)->prev = *p;
589 (*p)->root_worker = nullptr;
590 (*p)->event_cursor = 0;
591 (*p)->event_count = 0;
592 return GRPC_ERROR_NONE;
595 static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
596 grpc_error* error = GRPC_ERROR_NONE;
597 static const char* err_desc = "pollable_add_fd";
598 const int epfd = p->epfd;
599 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
600 gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
603 struct epoll_event ev_fd;
605 static_cast<uint32_t>(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
606 /* Use the second least significant bit of ev_fd.data.ptr to store track_err
607 * to avoid synchronization issues when accessing it after receiving an event.
608 * Accessing fd would be a data race there because the fd might have been
609 * returned to the free list at that point. */
610 ev_fd.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(fd) |
611 (fd->track_err ? 2 : 0));
612 GRPC_STATS_INC_SYSCALL_EPOLL_CTL();
613 if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
618 append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
625 /*******************************************************************************
626 * Pollset Definitions
629 GPR_TLS_DECL(g_current_thread_pollset);
630 GPR_TLS_DECL(g_current_thread_worker);
632 /* Global state management */
633 static grpc_error* pollset_global_init(void) {
634 gpr_tls_init(&g_current_thread_pollset);
635 gpr_tls_init(&g_current_thread_worker);
636 return pollable_create(PO_EMPTY, &g_empty_pollable);
639 static void pollset_global_shutdown(void) {
640 POLLABLE_UNREF(g_empty_pollable, "g_empty_pollable");
641 gpr_tls_destroy(&g_current_thread_pollset);
642 gpr_tls_destroy(&g_current_thread_worker);
645 /* pollset->mu must be held while calling this function */
646 static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
647 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
649 "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
650 "rw=%p (target:NULL) cpsc=%d (target:0)",
651 pollset, pollset->active_pollable, pollset->shutdown_closure,
652 pollset->root_worker, pollset->containing_pollset_set_count);
654 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
655 pollset->containing_pollset_set_count == 0) {
656 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
657 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
658 pollset->shutdown_closure = nullptr;
659 pollset->already_shutdown = true;
663 /* pollset->mu must be held before calling this function,
664 * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
666 static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
667 GPR_TIMER_SCOPE("kick_one_worker", 0);
668 pollable* p = specific_worker->pollable_obj;
669 grpc_core::MutexLock lock(&p->mu);
670 GPR_ASSERT(specific_worker != nullptr);
671 if (specific_worker->kicked) {
672 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
673 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
675 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
676 return GRPC_ERROR_NONE;
678 if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
679 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
680 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
682 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
683 specific_worker->kicked = true;
684 return GRPC_ERROR_NONE;
686 if (specific_worker == p->root_worker) {
687 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
688 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
689 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
691 specific_worker->kicked = true;
692 grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
695 if (specific_worker->initialized_cv) {
696 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
697 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
698 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
700 specific_worker->kicked = true;
701 gpr_cv_signal(&specific_worker->cv);
702 return GRPC_ERROR_NONE;
704 // we can get here during end_worker after removing specific_worker from the
705 // pollable list but before removing it from the pollset list
706 return GRPC_ERROR_NONE;
709 static grpc_error* pollset_kick(grpc_pollset* pollset,
710 grpc_pollset_worker* specific_worker) {
711 GPR_TIMER_SCOPE("pollset_kick", 0);
712 GRPC_STATS_INC_POLLSET_KICK();
713 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
715 "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
716 pollset, specific_worker,
717 (void*)gpr_tls_get(&g_current_thread_pollset),
718 (void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
720 if (specific_worker == nullptr) {
721 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
722 if (pollset->root_worker == nullptr) {
723 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
724 gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
726 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
727 pollset->kicked_without_poller = true;
728 return GRPC_ERROR_NONE;
730 // We've been asked to kick a poller, but we haven't been told which one
732 // We look at the pollset worker list because:
733 // 1. the pollable list may include workers from other pollers, so we'd
734 // need to do an O(N) search
735 // 2. we'd additionally need to take the pollable lock, which we've so
737 // Now, we would prefer to wake a poller in cv_wait, and not in
738 // epoll_wait (since the latter would imply the need to do an additional
740 // We know that if a worker is at the root of a pollable, it's (likely)
741 // also the root of a pollset, and we know that if a worker is NOT at
742 // the root of a pollset, it's (likely) not at the root of a pollable,
743 // so we take our chances and choose the SECOND worker enqueued against
744 // the pollset as a worker that's likely to be in cv_wait
745 return kick_one_worker(
746 pollset->root_worker->links[PWLINK_POLLSET].next);
749 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
750 gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
752 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
753 return GRPC_ERROR_NONE;
756 return kick_one_worker(specific_worker);
760 static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
761 GPR_TIMER_SCOPE("pollset_kick_all", 0);
762 grpc_error* error = GRPC_ERROR_NONE;
763 const char* err_desc = "pollset_kick_all";
764 grpc_pollset_worker* w = pollset->root_worker;
767 GRPC_STATS_INC_POLLSET_KICK();
768 append_error(&error, kick_one_worker(w), err_desc);
769 w = w->links[PWLINK_POLLSET].next;
770 } while (w != pollset->root_worker);
775 static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
776 gpr_mu_init(&pollset->mu);
777 gpr_atm_no_barrier_store(&pollset->worker_count, 0);
778 gpr_atm_no_barrier_store(&pollset->active_pollable_type, PO_EMPTY);
779 pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
780 pollset->kicked_without_poller = false;
781 pollset->shutdown_closure = nullptr;
782 pollset->already_shutdown = false;
783 pollset->root_worker = nullptr;
784 pollset->containing_pollset_set_count = 0;
788 static int poll_deadline_to_millis_timeout(grpc_millis millis) {
789 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
790 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
796 return static_cast<int>(delta);
799 static void fd_become_readable(grpc_fd* fd) { fd->read_closure.SetReady(); }
801 static void fd_become_writable(grpc_fd* fd) { fd->write_closure.SetReady(); }
803 static void fd_has_errors(grpc_fd* fd) { fd->error_closure.SetReady(); }
805 /* Get the pollable_obj attached to this fd. If none is attached, create a new
806 * pollable object (of type PO_FD), attach it to the fd and return it
808 * Note that if a pollable object is already attached to the fd, it may be of
809 * either PO_FD or PO_MULTI type */
810 static grpc_error* get_fd_pollable(grpc_fd* fd, pollable** p) {
811 gpr_mu_lock(&fd->pollable_mu);
812 grpc_error* error = GRPC_ERROR_NONE;
813 static const char* err_desc = "get_fd_pollable";
814 if (fd->pollable_obj == nullptr) {
815 if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj),
817 fd->pollable_obj->owner_fd = fd;
818 if (!append_error(&error, pollable_add_fd(fd->pollable_obj, fd),
820 POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
821 fd->pollable_obj = nullptr;
825 if (error == GRPC_ERROR_NONE) {
826 GPR_ASSERT(fd->pollable_obj != nullptr);
827 *p = POLLABLE_REF(fd->pollable_obj, "pollset");
829 GPR_ASSERT(fd->pollable_obj == nullptr);
832 gpr_mu_unlock(&fd->pollable_mu);
836 /* pollset->po.mu lock must be held by the caller before calling this */
837 static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
838 GPR_TIMER_SCOPE("pollset_shutdown", 0);
839 GPR_ASSERT(pollset->shutdown_closure == nullptr);
840 pollset->shutdown_closure = closure;
841 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
842 pollset_maybe_finish_shutdown(pollset);
845 static grpc_error* pollable_process_events(grpc_pollset* pollset,
846 pollable* pollable_obj, bool drain) {
847 GPR_TIMER_SCOPE("pollable_process_events", 0);
848 static const char* err_desc = "pollset_process_events";
849 // Use a simple heuristic to determine how many fd events to process
850 // per loop iteration. (events/workers)
851 int handle_count = 1;
852 int worker_count = gpr_atm_no_barrier_load(&pollset->worker_count);
853 GPR_ASSERT(worker_count > 0);
855 (pollable_obj->event_count - pollable_obj->event_cursor) / worker_count;
856 if (handle_count == 0) {
858 } else if (handle_count > MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) {
859 handle_count = MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL;
861 grpc_error* error = GRPC_ERROR_NONE;
862 for (int i = 0; (drain || i < handle_count) &&
863 pollable_obj->event_cursor != pollable_obj->event_count;
865 int n = pollable_obj->event_cursor++;
866 struct epoll_event* ev = &pollable_obj->events[n];
867 void* data_ptr = ev->data.ptr;
868 if (1 & (intptr_t)data_ptr) {
869 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
870 gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
873 grpc_wakeup_fd_consume_wakeup(
874 (grpc_wakeup_fd*)((~static_cast<intptr_t>(1)) &
875 (intptr_t)data_ptr)),
879 reinterpret_cast<grpc_fd*>(reinterpret_cast<intptr_t>(data_ptr) & ~2);
880 bool track_err = reinterpret_cast<intptr_t>(data_ptr) & 2;
881 bool cancel = (ev->events & EPOLLHUP) != 0;
882 bool error = (ev->events & EPOLLERR) != 0;
883 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
884 bool write_ev = (ev->events & EPOLLOUT) != 0;
885 bool err_fallback = error && !track_err;
887 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
889 "PS:%p got fd %p: cancel=%d read=%d "
891 pollset, fd, cancel, read_ev, write_ev);
893 if (error && !err_fallback) {
896 if (read_ev || cancel || err_fallback) {
897 fd_become_readable(fd);
899 if (write_ev || cancel || err_fallback) {
900 fd_become_writable(fd);
908 /* pollset_shutdown is guaranteed to be called before pollset_destroy. */
909 static void pollset_destroy(grpc_pollset* pollset) {
910 POLLABLE_UNREF(pollset->active_pollable, "pollset");
911 pollset->active_pollable = nullptr;
912 gpr_mu_destroy(&pollset->mu);
915 static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
916 GPR_TIMER_SCOPE("pollable_epoll", 0);
917 int timeout = poll_deadline_to_millis_timeout(deadline);
919 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
920 char* desc = pollable_desc(p);
921 gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
926 GRPC_SCHEDULING_START_BLOCKING_REGION;
930 GRPC_STATS_INC_SYSCALL_POLL();
931 r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout);
932 } while (r < 0 && errno == EINTR);
934 GRPC_SCHEDULING_END_BLOCKING_REGION;
937 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
939 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
940 gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
946 return GRPC_ERROR_NONE;
949 /* Return true if first in list */
950 static bool worker_insert(grpc_pollset_worker** root_worker,
951 grpc_pollset_worker* worker, pwlinks link) {
952 if (*root_worker == nullptr) {
953 *root_worker = worker;
954 worker->links[link].next = worker->links[link].prev = worker;
957 worker->links[link].next = *root_worker;
958 worker->links[link].prev = worker->links[link].next->links[link].prev;
959 worker->links[link].next->links[link].prev = worker;
960 worker->links[link].prev->links[link].next = worker;
965 /* returns the new root IFF the root changed */
966 typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result;
968 static worker_remove_result worker_remove(grpc_pollset_worker** root_worker,
969 grpc_pollset_worker* worker,
971 if (worker == *root_worker) {
972 if (worker == worker->links[link].next) {
973 *root_worker = nullptr;
976 *root_worker = worker->links[link].next;
977 worker->links[link].prev->links[link].next = worker->links[link].next;
978 worker->links[link].next->links[link].prev = worker->links[link].prev;
982 worker->links[link].prev->links[link].next = worker->links[link].next;
983 worker->links[link].next->links[link].prev = worker->links[link].prev;
988 /* Return true if this thread should poll */
989 static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
990 grpc_pollset_worker** worker_hdl,
991 grpc_millis deadline) {
992 GPR_TIMER_SCOPE("begin_worker", 0);
994 (pollset->shutdown_closure == nullptr && !pollset->already_shutdown);
995 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, 1);
996 if (worker_hdl != nullptr) *worker_hdl = worker;
997 worker->initialized_cv = false;
998 worker->kicked = false;
999 worker->pollset = pollset;
1000 worker->pollable_obj =
1001 POLLABLE_REF(pollset->active_pollable, "pollset_worker");
1002 worker_insert(&pollset->root_worker, worker, PWLINK_POLLSET);
1003 gpr_mu_lock(&worker->pollable_obj->mu);
1004 if (!worker_insert(&worker->pollable_obj->root_worker, worker,
1006 worker->initialized_cv = true;
1007 gpr_cv_init(&worker->cv);
1008 gpr_mu_unlock(&pollset->mu);
1009 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace) &&
1010 worker->pollable_obj->root_worker != worker) {
1011 gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset,
1012 worker->pollable_obj, worker,
1013 poll_deadline_to_millis_timeout(deadline));
1015 while (do_poll && worker->pollable_obj->root_worker != worker) {
1016 if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
1017 grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
1018 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1019 gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
1020 worker->pollable_obj, worker);
1023 } else if (worker->kicked) {
1024 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1025 gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
1026 worker->pollable_obj, worker);
1029 } else if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace) &&
1030 worker->pollable_obj->root_worker != worker) {
1031 gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
1032 worker->pollable_obj, worker);
1035 grpc_core::ExecCtx::Get()->InvalidateNow();
1037 gpr_mu_unlock(&pollset->mu);
1039 gpr_mu_unlock(&worker->pollable_obj->mu);
1044 static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
1045 grpc_pollset_worker** worker_hdl) {
1046 GPR_TIMER_SCOPE("end_worker", 0);
1047 gpr_mu_lock(&pollset->mu);
1048 gpr_mu_lock(&worker->pollable_obj->mu);
1049 switch (worker_remove(&worker->pollable_obj->root_worker, worker,
1051 case WRR_NEW_ROOT: {
1052 // wakeup new poller
1053 grpc_pollset_worker* new_root = worker->pollable_obj->root_worker;
1054 GPR_ASSERT(new_root->initialized_cv);
1055 gpr_cv_signal(&new_root->cv);
1059 if (pollset->active_pollable != worker->pollable_obj) {
1060 // pollable no longer being polled: flush events
1061 pollable_process_events(pollset, worker->pollable_obj, true);
1067 gpr_mu_unlock(&worker->pollable_obj->mu);
1068 POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
1069 if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
1071 pollset_maybe_finish_shutdown(pollset);
1073 if (worker->initialized_cv) {
1074 gpr_cv_destroy(&worker->cv);
1076 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, -1);
1080 static long sys_gettid(void) { return syscall(__NR_gettid); }
1083 /* pollset->mu lock must be held by the caller before calling this.
1084 The function pollset_work() may temporarily release the lock (pollset->po.mu)
1085 during the course of its execution but it will always re-acquire the lock and
1086 ensure that it is held by the time the function returns */
1087 static grpc_error* pollset_work(grpc_pollset* pollset,
1088 grpc_pollset_worker** worker_hdl,
1089 grpc_millis deadline) {
1090 GPR_TIMER_SCOPE("pollset_work", 0);
1091 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1092 grpc_pollset_worker* worker =
1093 (grpc_pollset_worker*)gpr_malloc(sizeof(*worker));
1094 #define WORKER_PTR (worker)
1096 grpc_pollset_worker worker;
1097 #define WORKER_PTR (&worker)
1100 WORKER_PTR->originator = sys_gettid();
1102 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1104 "PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
1105 " kwp=%d pollable=%p",
1106 pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
1107 deadline, pollset->kicked_without_poller, pollset->active_pollable);
1109 static const char* err_desc = "pollset_work";
1110 grpc_error* error = GRPC_ERROR_NONE;
1111 if (pollset->kicked_without_poller) {
1112 pollset->kicked_without_poller = false;
1114 if (begin_worker(pollset, WORKER_PTR, worker_hdl, deadline)) {
1115 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
1116 gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
1117 if (WORKER_PTR->pollable_obj->event_cursor ==
1118 WORKER_PTR->pollable_obj->event_count) {
1119 append_error(&error, pollable_epoll(WORKER_PTR->pollable_obj, deadline),
1124 pollable_process_events(pollset, WORKER_PTR->pollable_obj, false),
1126 grpc_core::ExecCtx::Get()->Flush();
1127 gpr_tls_set(&g_current_thread_pollset, 0);
1128 gpr_tls_set(&g_current_thread_worker, 0);
1130 end_worker(pollset, WORKER_PTR, worker_hdl);
1132 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1139 static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
1140 grpc_pollset* pollset, grpc_fd* fd) {
1141 static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
1142 grpc_error* error = GRPC_ERROR_NONE;
1143 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1145 "PS:%p add fd %p (%d); transition pollable from empty to fd",
1146 pollset, fd, fd->fd);
1148 append_error(&error, pollset_kick_all(pollset), err_desc);
1149 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1150 append_error(&error, get_fd_pollable(fd, &pollset->active_pollable),
1155 static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
1156 grpc_pollset* pollset, grpc_fd* and_add_fd) {
1157 static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
1158 grpc_error* error = GRPC_ERROR_NONE;
1159 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1162 "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
1163 pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
1164 pollset->active_pollable->owner_fd);
1166 append_error(&error, pollset_kick_all(pollset), err_desc);
1167 grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
1168 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1169 pollset->active_pollable = nullptr;
1170 if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable),
1172 append_error(&error, pollable_add_fd(pollset->active_pollable, initial_fd),
1174 if (and_add_fd != nullptr) {
1175 append_error(&error,
1176 pollable_add_fd(pollset->active_pollable, and_add_fd),
1183 /* expects pollsets locked, flag whether fd is locked or not */
1184 static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
1185 grpc_error* error = GRPC_ERROR_NONE;
1186 pollable* po_at_start =
1187 POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
1188 switch (pollset->active_pollable->type) {
1190 /* empty pollable --> single fd pollable */
1191 error = pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1194 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1195 if (po_at_start->owner_orphaned) {
1197 pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1199 /* fd --> multipoller */
1201 pollset_transition_pollable_from_fd_to_multi_locked(pollset, fd);
1203 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1206 error = pollable_add_fd(pollset->active_pollable, fd);
1209 if (error != GRPC_ERROR_NONE) {
1210 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1211 pollset->active_pollable = po_at_start;
1213 gpr_atm_rel_store(&pollset->active_pollable_type,
1214 pollset->active_pollable->type);
1215 POLLABLE_UNREF(po_at_start, "pollset_add_fd");
1220 static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
1221 pollable** pollable_obj) {
1222 grpc_error* error = GRPC_ERROR_NONE;
1223 pollable* po_at_start =
1224 POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable");
1225 switch (pollset->active_pollable->type) {
1227 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1228 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1229 /* Any workers currently polling on this pollset must now be woked up so
1230 * that they can pick up the new active_pollable */
1231 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1233 "PS:%p active pollable transition from empty to multi",
1236 static const char* err_desc =
1237 "pollset_as_multipollable_locked: empty -> multi";
1238 append_error(&error, pollset_kick_all(pollset), err_desc);
1241 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1242 if (po_at_start->owner_orphaned) {
1243 // Unlock before Unref'ing the pollable
1244 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1245 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1246 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1248 error = pollset_transition_pollable_from_fd_to_multi_locked(pollset,
1250 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1256 if (error != GRPC_ERROR_NONE) {
1257 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1258 pollset->active_pollable = po_at_start;
1259 *pollable_obj = nullptr;
1261 gpr_atm_rel_store(&pollset->active_pollable_type,
1262 pollset->active_pollable->type);
1263 *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
1264 POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
1269 static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
1270 GPR_TIMER_SCOPE("pollset_add_fd", 0);
1272 // We never transition from PO_MULTI to other modes (i.e., PO_FD or PO_EMPTY)
1273 // and, thus, it is safe to simply store and check whether the FD has already
1274 // been added to the active pollable previously.
1275 if (gpr_atm_acq_load(&pollset->active_pollable_type) == PO_MULTI &&
1276 fd_has_pollset(fd, pollset)) {
1280 grpc_core::MutexLock lock(&pollset->mu);
1281 grpc_error* error = pollset_add_fd_locked(pollset, fd);
1283 // If we are in PO_MULTI mode, we should update the pollsets of the FD.
1284 if (gpr_atm_no_barrier_load(&pollset->active_pollable_type) == PO_MULTI) {
1285 fd_add_pollset(fd, pollset);
1288 GRPC_LOG_IF_ERROR("pollset_add_fd", error);
1291 /*******************************************************************************
1292 * Pollset-set Definitions
1295 static grpc_pollset_set* pss_lock_adam(grpc_pollset_set* pss) {
1296 gpr_mu_lock(&pss->mu);
1297 while (pss->parent != nullptr) {
1298 gpr_mu_unlock(&pss->mu);
1300 gpr_mu_lock(&pss->mu);
1305 static grpc_pollset_set* pollset_set_create(void) {
1306 grpc_pollset_set* pss =
1307 static_cast<grpc_pollset_set*>(gpr_zalloc(sizeof(*pss)));
1308 gpr_mu_init(&pss->mu);
1309 new (&pss->refs) grpc_core::RefCount();
1313 static void pollset_set_unref(grpc_pollset_set* pss) {
1314 if (pss == nullptr) return;
1315 if (GPR_LIKELY(!pss->refs.Unref())) return;
1316 pollset_set_unref(pss->parent);
1317 gpr_mu_destroy(&pss->mu);
1318 for (size_t i = 0; i < pss->pollset_count; i++) {
1319 gpr_mu_lock(&pss->pollsets[i]->mu);
1320 if (0 == --pss->pollsets[i]->containing_pollset_set_count) {
1321 pollset_maybe_finish_shutdown(pss->pollsets[i]);
1323 gpr_mu_unlock(&pss->pollsets[i]->mu);
1325 for (size_t i = 0; i < pss->fd_count; i++) {
1326 UNREF_BY(pss->fds[i], 2, "pollset_set");
1328 gpr_free(pss->pollsets);
1333 static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1334 GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
1335 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1336 gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
1338 grpc_error* error = GRPC_ERROR_NONE;
1339 static const char* err_desc = "pollset_set_add_fd";
1340 pss = pss_lock_adam(pss);
1341 for (size_t i = 0; i < pss->pollset_count; i++) {
1342 append_error(&error, pollable_add_fd(pss->pollsets[i]->active_pollable, fd),
1345 if (pss->fd_count == pss->fd_capacity) {
1346 pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
1347 pss->fds = static_cast<grpc_fd**>(
1348 gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds)));
1350 REF_BY(fd, 2, "pollset_set");
1351 pss->fds[pss->fd_count++] = fd;
1352 gpr_mu_unlock(&pss->mu);
1354 GRPC_LOG_IF_ERROR(err_desc, error);
1357 static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1358 GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
1359 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1360 gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
1362 pss = pss_lock_adam(pss);
1364 for (i = 0; i < pss->fd_count; i++) {
1365 if (pss->fds[i] == fd) {
1366 UNREF_BY(fd, 2, "pollset_set");
1370 GPR_ASSERT(i != pss->fd_count);
1371 for (; i < pss->fd_count - 1; i++) {
1372 pss->fds[i] = pss->fds[i + 1];
1375 gpr_mu_unlock(&pss->mu);
1378 static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1379 GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
1380 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1381 gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
1383 pss = pss_lock_adam(pss);
1385 for (i = 0; i < pss->pollset_count; i++) {
1386 if (pss->pollsets[i] == ps) {
1390 GPR_ASSERT(i != pss->pollset_count);
1391 for (; i < pss->pollset_count - 1; i++) {
1392 pss->pollsets[i] = pss->pollsets[i + 1];
1394 pss->pollset_count--;
1395 gpr_mu_unlock(&pss->mu);
1396 gpr_mu_lock(&ps->mu);
1397 if (0 == --ps->containing_pollset_set_count) {
1398 pollset_maybe_finish_shutdown(ps);
1400 gpr_mu_unlock(&ps->mu);
1403 // add all fds to pollables, and output a new array of unorphaned out_fds
1404 // assumes pollsets are multipollable
1405 static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
1406 grpc_pollset** pollsets,
1407 size_t pollset_count,
1408 const char* err_desc, grpc_fd** out_fds,
1409 size_t* out_fd_count) {
1410 GPR_TIMER_SCOPE("add_fds_to_pollsets", 0);
1411 grpc_error* error = GRPC_ERROR_NONE;
1412 for (size_t i = 0; i < fd_count; i++) {
1413 gpr_mu_lock(&fds[i]->orphan_mu);
1414 if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
1415 gpr_mu_unlock(&fds[i]->orphan_mu);
1416 UNREF_BY(fds[i], 2, "pollset_set");
1418 for (size_t j = 0; j < pollset_count; j++) {
1419 append_error(&error,
1420 pollable_add_fd(pollsets[j]->active_pollable, fds[i]),
1423 gpr_mu_unlock(&fds[i]->orphan_mu);
1424 out_fds[(*out_fd_count)++] = fds[i];
1430 static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1431 GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
1432 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1433 gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
1435 grpc_error* error = GRPC_ERROR_NONE;
1436 static const char* err_desc = "pollset_set_add_pollset";
1437 pollable* pollable_obj = nullptr;
1438 gpr_mu_lock(&ps->mu);
1439 if (!GRPC_LOG_IF_ERROR(err_desc,
1440 pollset_as_multipollable_locked(ps, &pollable_obj))) {
1441 GPR_ASSERT(pollable_obj == nullptr);
1442 gpr_mu_unlock(&ps->mu);
1445 ps->containing_pollset_set_count++;
1446 gpr_mu_unlock(&ps->mu);
1447 pss = pss_lock_adam(pss);
1448 size_t initial_fd_count = pss->fd_count;
1450 append_error(&error,
1451 add_fds_to_pollsets(pss->fds, initial_fd_count, &ps, 1, err_desc,
1452 pss->fds, &pss->fd_count),
1454 if (pss->pollset_count == pss->pollset_capacity) {
1455 pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
1456 pss->pollsets = static_cast<grpc_pollset**>(gpr_realloc(
1457 pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets)));
1459 pss->pollsets[pss->pollset_count++] = ps;
1460 gpr_mu_unlock(&pss->mu);
1461 POLLABLE_UNREF(pollable_obj, "pollset_set");
1463 GRPC_LOG_IF_ERROR(err_desc, error);
1466 static void pollset_set_add_pollset_set(grpc_pollset_set* a,
1467 grpc_pollset_set* b) {
1468 GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
1469 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1470 gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
1472 grpc_error* error = GRPC_ERROR_NONE;
1473 static const char* err_desc = "pollset_set_add_fd";
1476 // pollset ancestors are the same: nothing to do
1480 GPR_SWAP(grpc_pollset_set*, a, b);
1482 gpr_mu* a_mu = &a->mu;
1483 gpr_mu* b_mu = &b->mu;
1486 if (a->parent != nullptr) {
1488 } else if (b->parent != nullptr) {
1491 break; // exit loop, both pollsets locked
1493 gpr_mu_unlock(a_mu);
1494 gpr_mu_unlock(b_mu);
1496 // try to do the least copying possible
1497 // TODO(sreek): there's probably a better heuristic here
1498 const size_t a_size = a->fd_count + a->pollset_count;
1499 const size_t b_size = b->fd_count + b->pollset_count;
1500 if (b_size > a_size) {
1501 GPR_SWAP(grpc_pollset_set*, a, b);
1503 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1504 gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
1508 if (a->fd_capacity < a->fd_count + b->fd_count) {
1509 a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
1510 a->fds = static_cast<grpc_fd**>(
1511 gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds)));
1513 size_t initial_a_fd_count = a->fd_count;
1517 add_fds_to_pollsets(a->fds, initial_a_fd_count, b->pollsets,
1518 b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
1522 add_fds_to_pollsets(b->fds, b->fd_count, a->pollsets, a->pollset_count,
1523 "merge_b2a", a->fds, &a->fd_count),
1525 if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
1526 a->pollset_capacity =
1527 GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
1528 a->pollsets = static_cast<grpc_pollset**>(
1529 gpr_realloc(a->pollsets, a->pollset_capacity * sizeof(*a->pollsets)));
1531 if (b->pollset_count > 0) {
1532 memcpy(a->pollsets + a->pollset_count, b->pollsets,
1533 b->pollset_count * sizeof(*b->pollsets));
1535 a->pollset_count += b->pollset_count;
1537 gpr_free(b->pollsets);
1539 b->pollsets = nullptr;
1540 b->fd_count = b->fd_capacity = b->pollset_count = b->pollset_capacity = 0;
1541 gpr_mu_unlock(&a->mu);
1542 gpr_mu_unlock(&b->mu);
1545 static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
1546 grpc_pollset_set* item) {}
1548 /*******************************************************************************
1549 * Event engine binding
1552 static bool is_any_background_poller_thread(void) { return false; }
1554 static void shutdown_background_closure(void) {}
1556 static bool add_closure_to_background_poller(grpc_closure* closure,
1557 grpc_error* error) {
1561 static void shutdown_engine(void) {
1562 fd_global_shutdown();
1563 pollset_global_shutdown();
1566 static const grpc_event_engine_vtable vtable = {
1567 sizeof(grpc_pollset),
1591 pollset_set_unref, // destroy ==> unref 1 public ref
1592 pollset_set_add_pollset,
1593 pollset_set_del_pollset,
1594 pollset_set_add_pollset_set,
1595 pollset_set_del_pollset_set,
1599 is_any_background_poller_thread,
1600 shutdown_background_closure,
1602 add_closure_to_background_poller,
1605 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1606 bool explicitly_requested) {
1607 if (!grpc_has_wakeup_fd()) {
1608 gpr_log(GPR_ERROR, "Skipping epollex because of no wakeup fd.");
1612 if (!grpc_is_epollexclusive_available()) {
1613 gpr_log(GPR_INFO, "Skipping epollex because it is not supported.");
1619 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
1620 pollset_global_shutdown();
1621 fd_global_shutdown();
1628 #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
1629 #if defined(GRPC_POSIX_SOCKET_EV_EPOLLEX)
1630 #include "src/core/lib/iomgr/ev_epollex_linux.h"
1631 /* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means
1632 epoll_create1 is not available. Return NULL */
1633 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1634 bool explicitly_requested) {
1637 #endif /* defined(GRPC_POSIX_SOCKET_EV_EPOLLEX) */
1639 #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */