3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
21 #include "src/core/lib/iomgr/port.h"
23 #ifdef GRPC_POSIX_SOCKET_TCP
25 #include "src/core/lib/iomgr/tcp_posix.h"
29 #include <netinet/in.h>
30 #include <netinet/tcp.h>
35 #include <sys/socket.h>
36 #include <sys/types.h>
40 #include <grpc/slice.h>
41 #include <grpc/support/alloc.h>
42 #include <grpc/support/log.h>
43 #include <grpc/support/string_util.h>
44 #include <grpc/support/sync.h>
45 #include <grpc/support/time.h>
47 #include "src/core/lib/channel/channel_args.h"
48 #include "src/core/lib/debug/stats.h"
49 #include "src/core/lib/debug/trace.h"
50 #include "src/core/lib/gpr/string.h"
51 #include "src/core/lib/gpr/useful.h"
52 #include "src/core/lib/iomgr/buffer_list.h"
53 #include "src/core/lib/iomgr/ev_posix.h"
54 #include "src/core/lib/iomgr/executor.h"
55 #include "src/core/lib/profiling/timers.h"
56 #include "src/core/lib/slice/slice_internal.h"
57 #include "src/core/lib/slice/slice_string_helpers.h"
60 #define SOL_TCP IPPROTO_TCP
65 #define TCP_CM_INQ TCP_INQ
68 #ifdef GRPC_HAVE_MSG_NOSIGNAL
69 #define SENDMSG_FLAGS MSG_NOSIGNAL
71 #define SENDMSG_FLAGS 0
74 #ifdef GRPC_MSG_IOVLEN_TYPE
75 typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
77 typedef size_t msg_iovlen_type;
80 extern grpc_core::TraceFlag grpc_tcp_trace;
87 /* Used by the endpoint read function to distinguish the very first read call
91 double bytes_read_this_round;
92 grpc_core::RefCount refcount;
93 gpr_atm shutdown_count;
95 int min_read_chunk_size;
96 int max_read_chunk_size;
98 /* garbage after the last read */
99 grpc_slice_buffer last_read_buffer;
101 grpc_slice_buffer* incoming_buffer;
102 int inq; /* bytes pending on the socket from the last read. */
103 bool inq_capable; /* cache whether kernel supports inq */
105 grpc_slice_buffer* outgoing_buffer;
106 /* byte within outgoing_buffer->slices[0] to write next */
107 size_t outgoing_byte_idx;
109 grpc_closure* read_cb;
110 grpc_closure* write_cb;
111 grpc_closure* release_fd_cb;
114 grpc_closure read_done_closure;
115 grpc_closure write_done_closure;
116 grpc_closure error_closure;
120 grpc_resource_user* resource_user;
121 grpc_resource_user_slice_allocator slice_allocator;
123 grpc_core::TracedBuffer* tb_head; /* List of traced buffers */
124 gpr_mu tb_mu; /* Lock for access to list of traced buffers */
126 /* grpc_endpoint_write takes an argument which if non-null means that the
127 * transport layer wants the TCP layer to collect timestamps for this write.
128 * This arg is forwarded to the timestamps callback function when the ACK
129 * timestamp is received from the kernel. This arg is a (void *) which allows
130 * users of this API to pass in a pointer to any kind of structure. This
131 * structure could actually be a tag or any book-keeping object that the user
132 * can use to distinguish between different traced writes. The only
133 * requirement from the TCP endpoint layer is that this arg should be non-null
134 * if the user wants timestamps for the write. */
135 void* outgoing_buffer_arg;
136 /* A counter which starts at 0. It is initialized the first time the socket
137 * options for collecting timestamps are set, and is incremented with each
140 bool socket_ts_enabled; /* True if timestamping options are set on the socket
142 bool ts_capable; /* Cache whether we can set timestamping options */
143 gpr_atm stop_error_notification; /* Set to 1 if we do not want to be notified
147 struct backup_poller {
149 grpc_closure run_poller;
154 #define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
156 static gpr_atm g_uncovered_notifications_pending;
157 static gpr_atm g_backup_poller; /* backup_poller* */
159 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
160 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
161 static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
164 static void done_poller(void* bp, grpc_error* error_ignored) {
165 backup_poller* p = static_cast<backup_poller*>(bp);
166 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
167 gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
169 grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
173 static void run_poller(void* bp, grpc_error* error_ignored) {
174 backup_poller* p = static_cast<backup_poller*>(bp);
175 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
176 gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
178 gpr_mu_lock(p->pollset_mu);
179 grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
180 GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
182 "backup_poller:pollset_work",
183 grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
184 gpr_mu_unlock(p->pollset_mu);
185 /* last "uncovered" notification is the ref that keeps us polling, if we get
186 * there try a cas to release it */
187 if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
188 gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
189 gpr_mu_lock(p->pollset_mu);
190 bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
191 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
192 gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
194 gpr_mu_unlock(p->pollset_mu);
195 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
196 gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
198 grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
199 GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
200 grpc_schedule_on_exec_ctx));
202 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
203 gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
205 GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE);
209 static void drop_uncovered(grpc_tcp* tcp) {
210 backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
212 gpr_atm_full_fetch_add(&g_uncovered_notifications_pending, -1);
213 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
214 gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
215 static_cast<int>(old_count), static_cast<int>(old_count) - 1);
217 GPR_ASSERT(old_count != 1);
220 // gRPC API considers a Write operation to be done the moment it clears ‘flow
221 // control’ i.e., not necessarily sent on the wire. This means that the
222 // application MIGHT not call `grpc_completion_queue_next/pluck` in a timely
223 // manner when its `Write()` API is acked.
225 // We need to ensure that the fd is 'covered' (i.e being monitored by some
226 // polling thread and progress is made) and hence add it to a backup poller here
227 static void cover_self(grpc_tcp* tcp) {
230 gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
231 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
232 gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d",
233 static_cast<int>(old_count), 2 + static_cast<int>(old_count));
235 if (old_count == 0) {
236 GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
237 p = static_cast<backup_poller*>(
238 gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
239 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
240 gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
242 grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
243 gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
244 GRPC_CLOSURE_SCHED(GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
245 grpc_core::Executor::Scheduler(
246 grpc_core::ExecutorJobType::LONG)),
249 while ((p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller)) ==
251 // spin waiting for backup poller
254 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
255 gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp);
257 grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
258 if (old_count != 0) {
263 static void notify_on_read(grpc_tcp* tcp) {
264 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
265 gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
267 grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
270 static void notify_on_write(grpc_tcp* tcp) {
271 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
272 gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
274 if (!grpc_event_engine_run_in_background()) {
277 grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
280 static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
281 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
282 gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg, grpc_error_string(error));
284 drop_uncovered(static_cast<grpc_tcp*>(arg));
285 tcp_handle_write(arg, error);
288 static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
289 tcp->bytes_read_this_round += static_cast<double>(bytes);
292 static void finish_estimate(grpc_tcp* tcp) {
293 /* If we read >80% of the target buffer in one read loop, increase the size
294 of the target buffer to either the amount read, or twice its previous
296 if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
298 GPR_MAX(2 * tcp->target_length, tcp->bytes_read_this_round);
301 0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
303 tcp->bytes_read_this_round = 0;
306 static size_t get_target_read_size(grpc_tcp* tcp) {
307 grpc_resource_quota* rq = grpc_resource_user_quota(tcp->resource_user);
308 double pressure = grpc_resource_quota_get_memory_pressure(rq);
310 tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
311 size_t sz = ((static_cast<size_t> GPR_CLAMP(target, tcp->min_read_chunk_size,
312 tcp->max_read_chunk_size)) +
314 ~static_cast<size_t>(255);
315 /* don't use more than 1/16th of the overall resource quota for a single read
317 size_t rqmax = grpc_resource_quota_peek_size(rq);
318 if (sz > rqmax / 16 && rqmax > 1024) {
324 static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
325 return grpc_error_set_str(
327 grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
328 /* All tcp errors are marked with UNAVAILABLE so that application may
329 * choose to retry. */
330 GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
331 GRPC_ERROR_STR_TARGET_ADDRESS,
332 grpc_slice_from_copied_string(tcp->peer_string));
335 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
336 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
338 static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) {
339 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
340 grpc_fd_shutdown(tcp->em_fd, why);
341 grpc_resource_user_shutdown(tcp->resource_user);
344 static void tcp_free(grpc_tcp* tcp) {
345 grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
347 grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
348 grpc_resource_user_unref(tcp->resource_user);
349 gpr_free(tcp->peer_string);
350 /* The lock is not really necessary here, since all refs have been released */
351 gpr_mu_lock(&tcp->tb_mu);
352 grpc_core::TracedBuffer::Shutdown(
353 &tcp->tb_head, tcp->outgoing_buffer_arg,
354 GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
355 gpr_mu_unlock(&tcp->tb_mu);
356 tcp->outgoing_buffer_arg = nullptr;
357 gpr_mu_destroy(&tcp->tb_mu);
362 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), DEBUG_LOCATION)
363 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), DEBUG_LOCATION)
364 static void tcp_unref(grpc_tcp* tcp, const char* reason,
365 const grpc_core::DebugLocation& debug_location) {
366 if (GPR_UNLIKELY(tcp->refcount.Unref(debug_location, reason))) {
371 static void tcp_ref(grpc_tcp* tcp, const char* reason,
372 const grpc_core::DebugLocation& debug_location) {
373 tcp->refcount.Ref(debug_location, reason);
376 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
377 #define TCP_REF(tcp, reason) tcp_ref((tcp))
378 static void tcp_unref(grpc_tcp* tcp) {
379 if (GPR_UNLIKELY(tcp->refcount.Unref())) {
384 static void tcp_ref(grpc_tcp* tcp) { tcp->refcount.Ref(); }
387 static void tcp_destroy(grpc_endpoint* ep) {
388 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
389 grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
390 if (grpc_event_engine_can_track_errors()) {
391 gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
392 grpc_fd_set_error(tcp->em_fd);
394 TCP_UNREF(tcp, "destroy");
397 static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
398 grpc_closure* cb = tcp->read_cb;
400 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
401 gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
403 const char* str = grpc_error_string(error);
404 gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp, tcp->peer_string, str);
406 if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
407 for (i = 0; i < tcp->incoming_buffer->count; i++) {
408 char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
409 GPR_DUMP_HEX | GPR_DUMP_ASCII);
410 gpr_log(GPR_DEBUG, "DATA: %s", dump);
416 tcp->read_cb = nullptr;
417 tcp->incoming_buffer = nullptr;
418 GRPC_CLOSURE_SCHED(cb, error);
421 #define MAX_READ_IOVEC 4
422 static void tcp_do_read(grpc_tcp* tcp) {
423 GPR_TIMER_SCOPE("tcp_do_read", 0);
425 struct iovec iov[MAX_READ_IOVEC];
427 size_t total_read_bytes = 0;
429 std::min<size_t>(MAX_READ_IOVEC, tcp->incoming_buffer->count);
430 #ifdef GRPC_LINUX_ERRQUEUE
431 constexpr size_t cmsg_alloc_space =
432 CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) + CMSG_SPACE(sizeof(int));
434 constexpr size_t cmsg_alloc_space = 24 /* CMSG_SPACE(sizeof(int)) */;
435 #endif /* GRPC_LINUX_ERRQUEUE */
436 char cmsgbuf[cmsg_alloc_space];
437 for (size_t i = 0; i < iov_len; i++) {
438 iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
439 iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
443 /* Assume there is something on the queue. If we receive TCP_INQ from
444 * kernel, we will update this value, otherwise, we have to assume there is
445 * always something to read until we get EAGAIN. */
448 msg.msg_name = nullptr;
451 msg.msg_iovlen = static_cast<msg_iovlen_type>(iov_len);
452 if (tcp->inq_capable) {
453 msg.msg_control = cmsgbuf;
454 msg.msg_controllen = sizeof(cmsgbuf);
456 msg.msg_control = nullptr;
457 msg.msg_controllen = 0;
461 GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
462 GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
465 GPR_TIMER_SCOPE("recvmsg", 0);
466 GRPC_STATS_INC_SYSCALL_READ();
467 read_bytes = recvmsg(tcp->fd, &msg, 0);
468 } while (read_bytes < 0 && errno == EINTR);
470 /* We have read something in previous reads. We need to deliver those
471 * bytes to the upper layer. */
472 if (read_bytes <= 0 && total_read_bytes > 0) {
477 if (read_bytes < 0) {
478 /* NB: After calling call_read_cb a parallel call of the read handler may
480 if (errno == EAGAIN) {
481 finish_estimate(tcp);
483 /* We've consumed the edge, request a new one */
486 grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
488 tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
489 TCP_UNREF(tcp, "read");
493 if (read_bytes == 0) {
494 /* 0 read size ==> end of stream
496 * We may have read something, i.e., total_read_bytes > 0, but
497 * since the connection is closed we will drop the data here, because we
498 * can't call the callback multiple times. */
499 grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
501 tcp, tcp_annotate_error(
502 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
503 TCP_UNREF(tcp, "read");
507 GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
508 add_to_estimate(tcp, static_cast<size_t>(read_bytes));
509 GPR_DEBUG_ASSERT((size_t)read_bytes <=
510 tcp->incoming_buffer->length - total_read_bytes);
512 #ifdef GRPC_HAVE_TCP_INQ
513 if (tcp->inq_capable) {
514 GPR_DEBUG_ASSERT(!(msg.msg_flags & MSG_CTRUNC));
515 struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
516 for (; cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
517 if (cmsg->cmsg_level == SOL_TCP && cmsg->cmsg_type == TCP_CM_INQ &&
518 cmsg->cmsg_len == CMSG_LEN(sizeof(int))) {
519 tcp->inq = *reinterpret_cast<int*>(CMSG_DATA(cmsg));
524 #endif /* GRPC_HAVE_TCP_INQ */
526 total_read_bytes += read_bytes;
527 if (tcp->inq == 0 || total_read_bytes == tcp->incoming_buffer->length) {
528 /* We have filled incoming_buffer, and we cannot read any more. */
532 /* We had a partial read, and still have space to read more data.
533 * So, adjust IOVs and try to read more. */
534 size_t remaining = read_bytes;
536 for (size_t i = 0; i < iov_len; i++) {
537 if (remaining >= iov[i].iov_len) {
538 remaining -= iov[i].iov_len;
542 iov[j].iov_base = static_cast<char*>(iov[i].iov_base) + remaining;
543 iov[j].iov_len = iov[i].iov_len - remaining;
546 iov[j].iov_base = iov[i].iov_base;
547 iov[j].iov_len = iov[i].iov_len;
555 finish_estimate(tcp);
558 GPR_DEBUG_ASSERT(total_read_bytes > 0);
559 if (total_read_bytes < tcp->incoming_buffer->length) {
560 grpc_slice_buffer_trim_end(tcp->incoming_buffer,
561 tcp->incoming_buffer->length - total_read_bytes,
562 &tcp->last_read_buffer);
564 call_read_cb(tcp, GRPC_ERROR_NONE);
565 TCP_UNREF(tcp, "read");
568 static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
569 grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
570 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
571 gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
572 grpc_error_string(error));
574 if (error != GRPC_ERROR_NONE) {
575 grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
576 grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
577 call_read_cb(tcp, GRPC_ERROR_REF(error));
578 TCP_UNREF(tcp, "read");
584 static void tcp_continue_read(grpc_tcp* tcp) {
585 size_t target_read_size = get_target_read_size(tcp);
586 /* Wait for allocation only when there is no buffer left. */
587 if (tcp->incoming_buffer->length == 0 &&
588 tcp->incoming_buffer->count < MAX_READ_IOVEC) {
589 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
590 gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
592 grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1,
593 tcp->incoming_buffer);
595 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
596 gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
602 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
603 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
604 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
605 gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
608 if (error != GRPC_ERROR_NONE) {
609 grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
610 grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
611 call_read_cb(tcp, GRPC_ERROR_REF(error));
612 TCP_UNREF(tcp, "read");
614 tcp_continue_read(tcp);
618 static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
619 grpc_closure* cb, bool urgent) {
620 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
621 GPR_ASSERT(tcp->read_cb == nullptr);
623 tcp->incoming_buffer = incoming_buffer;
624 grpc_slice_buffer_reset_and_unref_internal(incoming_buffer);
625 grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
626 TCP_REF(tcp, "read");
627 if (tcp->is_first_read) {
628 /* Endpoint read called for the very first time. Register read callback with
629 * the polling engine */
630 tcp->is_first_read = false;
632 } else if (!urgent && tcp->inq == 0) {
633 /* Upper layer asked to read more but we know there is no pending data
634 * to read from previous reads. So, wait for POLLIN.
638 /* Not the first time. We may or may not have more bytes available. In any
639 * case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
640 * right thing (i.e calls tcp_do_read() which either reads the available
641 * bytes or calls notify_on_read() to be notified when new bytes become
643 GRPC_CLOSURE_SCHED(&tcp->read_done_closure, GRPC_ERROR_NONE);
647 /* A wrapper around sendmsg. It sends \a msg over \a fd and returns the number
649 ssize_t tcp_send(int fd, const struct msghdr* msg) {
650 GPR_TIMER_SCOPE("sendmsg", 1);
653 /* TODO(klempner): Cork if this is a partial write */
654 GRPC_STATS_INC_SYSCALL_WRITE();
655 sent_length = sendmsg(fd, msg, SENDMSG_FLAGS);
656 } while (sent_length < 0 && errno == EINTR);
660 /** This is to be called if outgoing_buffer_arg is not null. On linux platforms,
661 * this will call sendmsg with socket options set to collect timestamps inside
662 * the kernel. On return, sent_length is set to the return value of the sendmsg
663 * call. Returns false if setting the socket options failed. This is not
664 * implemented for non-linux platforms currently, and crashes out.
666 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
667 size_t sending_length,
668 ssize_t* sent_length);
670 /** The callback function to be invoked when we get an error on the socket. */
671 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error);
673 #ifdef GRPC_LINUX_ERRQUEUE
675 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
676 size_t sending_length,
677 ssize_t* sent_length) {
678 if (!tcp->socket_ts_enabled) {
679 uint32_t opt = grpc_core::kTimestampingSocketOptions;
680 if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
681 static_cast<void*>(&opt), sizeof(opt)) != 0) {
682 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
683 gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
687 tcp->bytes_counter = -1;
688 tcp->socket_ts_enabled = true;
690 /* Set control message to indicate that you want timestamps. */
692 char cmsg_buf[CMSG_SPACE(sizeof(uint32_t))];
693 struct cmsghdr align;
695 cmsghdr* cmsg = reinterpret_cast<cmsghdr*>(u.cmsg_buf);
696 cmsg->cmsg_level = SOL_SOCKET;
697 cmsg->cmsg_type = SO_TIMESTAMPING;
698 cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
699 *reinterpret_cast<int*>(CMSG_DATA(cmsg)) =
700 grpc_core::kTimestampingRecordingOptions;
701 msg->msg_control = u.cmsg_buf;
702 msg->msg_controllen = CMSG_SPACE(sizeof(uint32_t));
704 /* If there was an error on sendmsg the logic in tcp_flush will handle it. */
705 ssize_t length = tcp_send(tcp->fd, msg);
706 *sent_length = length;
707 /* Only save timestamps if all the bytes were taken by sendmsg. */
708 if (sending_length == static_cast<size_t>(length)) {
709 gpr_mu_lock(&tcp->tb_mu);
710 grpc_core::TracedBuffer::AddNewEntry(
711 &tcp->tb_head, static_cast<uint32_t>(tcp->bytes_counter + length),
712 tcp->fd, tcp->outgoing_buffer_arg);
713 gpr_mu_unlock(&tcp->tb_mu);
714 tcp->outgoing_buffer_arg = nullptr;
719 /** Reads \a cmsg to derive timestamps from the control messages. If a valid
720 * timestamp is found, the traced buffer list is updated with this timestamp.
721 * The caller of this function should be looping on the control messages found
722 * in \a msg. \a cmsg should point to the control message that the caller wants
724 * On return, a pointer to a control message is returned. On the next iteration,
725 * CMSG_NXTHDR(msg, ret_val) should be passed as \a cmsg. */
726 struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
727 struct cmsghdr* cmsg) {
728 auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
729 cmsghdr* opt_stats = nullptr;
730 if (next_cmsg == nullptr) {
731 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
732 gpr_log(GPR_ERROR, "Received timestamp without extended error");
737 /* Check if next_cmsg is an OPT_STATS msg */
738 if (next_cmsg->cmsg_level == SOL_SOCKET &&
739 next_cmsg->cmsg_type == SCM_TIMESTAMPING_OPT_STATS) {
740 opt_stats = next_cmsg;
741 next_cmsg = CMSG_NXTHDR(msg, opt_stats);
742 if (next_cmsg == nullptr) {
743 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
744 gpr_log(GPR_ERROR, "Received timestamp without extended error");
750 if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
751 !(next_cmsg->cmsg_type == IP_RECVERR ||
752 next_cmsg->cmsg_type == IPV6_RECVERR)) {
753 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
754 gpr_log(GPR_ERROR, "Unexpected control message");
760 reinterpret_cast<struct grpc_core::scm_timestamping*>(CMSG_DATA(cmsg));
761 auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(next_cmsg));
762 if (serr->ee_errno != ENOMSG ||
763 serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
764 gpr_log(GPR_ERROR, "Unexpected control message");
767 /* The error handling can potentially be done on another thread so we need
768 * to protect the traced buffer list. A lock free list might be better. Using
769 * a simple mutex for now. */
770 gpr_mu_lock(&tcp->tb_mu);
771 grpc_core::TracedBuffer::ProcessTimestamp(&tcp->tb_head, serr, opt_stats,
773 gpr_mu_unlock(&tcp->tb_mu);
777 /** For linux platforms, reads the socket's error queue and processes error
778 * messages from the queue.
780 static void process_errors(grpc_tcp* tcp) {
783 iov.iov_base = nullptr;
786 msg.msg_name = nullptr;
792 /* Allocate enough space so we don't need to keep increasing this as size
793 * of OPT_STATS increase */
794 constexpr size_t cmsg_alloc_space =
795 CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) +
796 CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in)) +
797 CMSG_SPACE(32 * NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)));
798 /* Allocate aligned space for cmsgs received along with timestamps */
800 char rbuf[cmsg_alloc_space];
801 struct cmsghdr align;
803 memset(&aligned_buf, 0, sizeof(aligned_buf));
805 msg.msg_control = aligned_buf.rbuf;
806 msg.msg_controllen = sizeof(aligned_buf.rbuf);
810 r = recvmsg(tcp->fd, &msg, MSG_ERRQUEUE);
812 } while (r < 0 && saved_errno == EINTR);
814 if (r == -1 && saved_errno == EAGAIN) {
815 return; /* No more errors to process */
820 if ((msg.msg_flags & MSG_CTRUNC) != 0) {
821 gpr_log(GPR_ERROR, "Error message was truncated.");
824 if (msg.msg_controllen == 0) {
825 /* There was no control message found. It was probably spurious. */
829 for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
830 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
831 if (cmsg->cmsg_level != SOL_SOCKET ||
832 cmsg->cmsg_type != SCM_TIMESTAMPING) {
833 /* Got a control message that is not a timestamp. Don't know how to
835 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
837 "unknown control message cmsg_level:%d cmsg_type:%d",
838 cmsg->cmsg_level, cmsg->cmsg_type);
842 cmsg = process_timestamp(tcp, &msg, cmsg);
851 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
852 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
853 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
854 gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp, grpc_error_string(error));
857 if (error != GRPC_ERROR_NONE ||
858 static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
859 /* We aren't going to register to hear on error anymore, so it is safe to
861 TCP_UNREF(tcp, "error-tracking");
865 /* We are still interested in collecting timestamps, so let's try reading
868 /* This might not a timestamps error. Set the read and write closures to be
870 grpc_fd_set_readable(tcp->em_fd);
871 grpc_fd_set_writable(tcp->em_fd);
872 grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
875 #else /* GRPC_LINUX_ERRQUEUE */
876 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
877 size_t sending_length,
878 ssize_t* sent_length) {
879 gpr_log(GPR_ERROR, "Write with timestamps not supported for this platform");
884 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
885 gpr_log(GPR_ERROR, "Error handling is not supported for this platform");
888 #endif /* GRPC_LINUX_ERRQUEUE */
890 /* If outgoing_buffer_arg is filled, shuts down the list early, so that any
891 * release operations needed can be performed on the arg */
892 void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
893 if (tcp->outgoing_buffer_arg) {
894 gpr_mu_lock(&tcp->tb_mu);
895 grpc_core::TracedBuffer::Shutdown(
896 &tcp->tb_head, tcp->outgoing_buffer_arg,
897 GRPC_ERROR_CREATE_FROM_STATIC_STRING("TracedBuffer list shutdown"));
898 gpr_mu_unlock(&tcp->tb_mu);
899 tcp->outgoing_buffer_arg = nullptr;
903 /* returns true if done, false if pending; if returning true, *error is set */
904 #if defined(IOV_MAX) && IOV_MAX < 1000
905 #define MAX_WRITE_IOVEC IOV_MAX
907 #define MAX_WRITE_IOVEC 1000
909 static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
911 struct iovec iov[MAX_WRITE_IOVEC];
912 msg_iovlen_type iov_size;
913 ssize_t sent_length = 0;
914 size_t sending_length;
916 size_t unwind_slice_idx;
917 size_t unwind_byte_idx;
919 // We always start at zero, because we eagerly unref and trim the slice
920 // buffer as we write
921 size_t outgoing_slice_idx = 0;
925 unwind_slice_idx = outgoing_slice_idx;
926 unwind_byte_idx = tcp->outgoing_byte_idx;
927 for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count &&
928 iov_size != MAX_WRITE_IOVEC;
930 iov[iov_size].iov_base =
931 GRPC_SLICE_START_PTR(
932 tcp->outgoing_buffer->slices[outgoing_slice_idx]) +
933 tcp->outgoing_byte_idx;
934 iov[iov_size].iov_len =
935 GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) -
936 tcp->outgoing_byte_idx;
937 sending_length += iov[iov_size].iov_len;
938 outgoing_slice_idx++;
939 tcp->outgoing_byte_idx = 0;
941 GPR_ASSERT(iov_size > 0);
943 msg.msg_name = nullptr;
946 msg.msg_iovlen = iov_size;
948 bool tried_sending_message = false;
949 if (tcp->outgoing_buffer_arg != nullptr) {
950 if (!tcp->ts_capable ||
951 !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length)) {
952 /* We could not set socket options to collect Fathom timestamps.
953 * Fallback on writing without timestamps. */
954 tcp->ts_capable = false;
955 tcp_shutdown_buffer_list(tcp);
957 tried_sending_message = true;
960 if (!tried_sending_message) {
961 msg.msg_control = nullptr;
962 msg.msg_controllen = 0;
964 GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
965 GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
967 sent_length = tcp_send(tcp->fd, &msg);
970 if (sent_length < 0) {
971 if (errno == EAGAIN) {
972 tcp->outgoing_byte_idx = unwind_byte_idx;
973 // unref all and forget about all slices that have been written to this
975 for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
976 grpc_slice_buffer_remove_first(tcp->outgoing_buffer);
979 } else if (errno == EPIPE) {
980 *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
981 grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
982 tcp_shutdown_buffer_list(tcp);
985 *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
986 grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
987 tcp_shutdown_buffer_list(tcp);
992 GPR_ASSERT(tcp->outgoing_byte_idx == 0);
993 tcp->bytes_counter += sent_length;
994 trailing = sending_length - static_cast<size_t>(sent_length);
995 while (trailing > 0) {
998 outgoing_slice_idx--;
1000 GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]);
1001 if (slice_length > trailing) {
1002 tcp->outgoing_byte_idx = slice_length - trailing;
1005 trailing -= slice_length;
1008 if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
1009 *error = GRPC_ERROR_NONE;
1010 grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
1016 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
1017 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1020 if (error != GRPC_ERROR_NONE) {
1022 tcp->write_cb = nullptr;
1023 GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_REF(error));
1024 TCP_UNREF(tcp, "write");
1028 if (!tcp_flush(tcp, &error)) {
1029 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1030 gpr_log(GPR_INFO, "write: delayed");
1032 notify_on_write(tcp);
1033 // tcp_flush does not populate error if it has returned false.
1034 GPR_DEBUG_ASSERT(error == GRPC_ERROR_NONE);
1037 tcp->write_cb = nullptr;
1038 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1039 const char* str = grpc_error_string(error);
1040 gpr_log(GPR_INFO, "write: %s", str);
1042 // No need to take a ref on error since tcp_flush provides a ref.
1043 GRPC_CLOSURE_SCHED(cb, error);
1044 TCP_UNREF(tcp, "write");
1048 static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
1049 grpc_closure* cb, void* arg) {
1050 GPR_TIMER_SCOPE("tcp_write", 0);
1051 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1052 grpc_error* error = GRPC_ERROR_NONE;
1054 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1057 for (i = 0; i < buf->count; i++) {
1058 gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string);
1059 if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
1061 grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
1062 gpr_log(GPR_DEBUG, "DATA: %s", data);
1068 GPR_ASSERT(tcp->write_cb == nullptr);
1070 tcp->outgoing_buffer_arg = arg;
1071 if (buf->length == 0) {
1073 cb, grpc_fd_is_shutdown(tcp->em_fd)
1074 ? tcp_annotate_error(
1075 GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp)
1077 tcp_shutdown_buffer_list(tcp);
1080 tcp->outgoing_buffer = buf;
1081 tcp->outgoing_byte_idx = 0;
1083 GPR_ASSERT(grpc_event_engine_can_track_errors());
1086 if (!tcp_flush(tcp, &error)) {
1087 TCP_REF(tcp, "write");
1089 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1090 gpr_log(GPR_INFO, "write: delayed");
1092 notify_on_write(tcp);
1094 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1095 const char* str = grpc_error_string(error);
1096 gpr_log(GPR_INFO, "write: %s", str);
1098 GRPC_CLOSURE_SCHED(cb, error);
1102 static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
1103 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1104 grpc_pollset_add_fd(pollset, tcp->em_fd);
1107 static void tcp_add_to_pollset_set(grpc_endpoint* ep,
1108 grpc_pollset_set* pollset_set) {
1109 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1110 grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
1113 static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
1114 grpc_pollset_set* pollset_set) {
1115 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1116 grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
1119 static char* tcp_get_peer(grpc_endpoint* ep) {
1120 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1121 return gpr_strdup(tcp->peer_string);
1124 static int tcp_get_fd(grpc_endpoint* ep) {
1125 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1129 static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
1130 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1131 return tcp->resource_user;
1134 static bool tcp_can_track_err(grpc_endpoint* ep) {
1135 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1136 if (!grpc_event_engine_can_track_errors()) {
1139 struct sockaddr addr;
1140 socklen_t len = sizeof(addr);
1141 if (getsockname(tcp->fd, &addr, &len) < 0) {
1144 if (addr.sa_family == AF_INET || addr.sa_family == AF_INET6) {
1150 static const grpc_endpoint_vtable vtable = {tcp_read,
1153 tcp_add_to_pollset_set,
1154 tcp_delete_from_pollset_set,
1157 tcp_get_resource_user,
1162 #define MAX_CHUNK_SIZE 32 * 1024 * 1024
1164 grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
1165 const grpc_channel_args* channel_args,
1166 const char* peer_string) {
1167 int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
1168 int tcp_max_read_chunk_size = 4 * 1024 * 1024;
1169 int tcp_min_read_chunk_size = 256;
1170 grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr);
1171 if (channel_args != nullptr) {
1172 for (size_t i = 0; i < channel_args->num_args; i++) {
1174 strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
1175 grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1176 tcp_read_chunk_size =
1177 grpc_channel_arg_get_integer(&channel_args->args[i], options);
1178 } else if (0 == strcmp(channel_args->args[i].key,
1179 GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) {
1180 grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1181 tcp_min_read_chunk_size =
1182 grpc_channel_arg_get_integer(&channel_args->args[i], options);
1183 } else if (0 == strcmp(channel_args->args[i].key,
1184 GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) {
1185 grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1186 tcp_max_read_chunk_size =
1187 grpc_channel_arg_get_integer(&channel_args->args[i], options);
1189 strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
1190 grpc_resource_quota_unref_internal(resource_quota);
1192 grpc_resource_quota_ref_internal(static_cast<grpc_resource_quota*>(
1193 channel_args->args[i].value.pointer.p));
1198 if (tcp_min_read_chunk_size > tcp_max_read_chunk_size) {
1199 tcp_min_read_chunk_size = tcp_max_read_chunk_size;
1201 tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size,
1202 tcp_max_read_chunk_size);
1204 grpc_tcp* tcp = static_cast<grpc_tcp*>(gpr_malloc(sizeof(grpc_tcp)));
1205 tcp->base.vtable = &vtable;
1206 tcp->peer_string = gpr_strdup(peer_string);
1207 tcp->fd = grpc_fd_wrapped_fd(em_fd);
1208 tcp->read_cb = nullptr;
1209 tcp->write_cb = nullptr;
1210 tcp->release_fd_cb = nullptr;
1211 tcp->release_fd = nullptr;
1212 tcp->incoming_buffer = nullptr;
1213 tcp->target_length = static_cast<double>(tcp_read_chunk_size);
1214 tcp->min_read_chunk_size = tcp_min_read_chunk_size;
1215 tcp->max_read_chunk_size = tcp_max_read_chunk_size;
1216 tcp->bytes_read_this_round = 0;
1217 /* Will be set to false by the very first endpoint read function */
1218 tcp->is_first_read = true;
1219 tcp->bytes_counter = -1;
1220 tcp->socket_ts_enabled = false;
1221 tcp->ts_capable = true;
1222 tcp->outgoing_buffer_arg = nullptr;
1223 /* paired with unref in grpc_tcp_destroy */
1224 new (&tcp->refcount) grpc_core::RefCount(1, &grpc_tcp_trace);
1225 gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
1227 grpc_slice_buffer_init(&tcp->last_read_buffer);
1228 tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
1229 grpc_resource_user_slice_allocator_init(
1230 &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
1231 grpc_resource_quota_unref_internal(resource_quota);
1232 gpr_mu_init(&tcp->tb_mu);
1233 tcp->tb_head = nullptr;
1234 GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
1235 grpc_schedule_on_exec_ctx);
1236 if (grpc_event_engine_run_in_background()) {
1237 // If there is a polling engine always running in the background, there is
1238 // no need to run the backup poller.
1239 GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
1240 grpc_schedule_on_exec_ctx);
1242 GRPC_CLOSURE_INIT(&tcp->write_done_closure,
1243 tcp_drop_uncovered_then_handle_write, tcp,
1244 grpc_schedule_on_exec_ctx);
1246 /* Always assume there is something on the queue to read. */
1248 #ifdef GRPC_HAVE_TCP_INQ
1250 if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
1251 tcp->inq_capable = true;
1253 gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
1254 tcp->inq_capable = false;
1257 tcp->inq_capable = false;
1258 #endif /* GRPC_HAVE_TCP_INQ */
1259 /* Start being notified on errors if event engine can track errors. */
1260 if (grpc_event_engine_can_track_errors()) {
1261 /* Grab a ref to tcp so that we can safely access the tcp struct when
1262 * processing errors. We unref when we no longer want to track errors
1264 TCP_REF(tcp, "error-tracking");
1265 gpr_atm_rel_store(&tcp->stop_error_notification, 0);
1266 GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
1267 grpc_schedule_on_exec_ctx);
1268 grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1274 int grpc_tcp_fd(grpc_endpoint* ep) {
1275 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1276 GPR_ASSERT(ep->vtable == &vtable);
1277 return grpc_fd_wrapped_fd(tcp->em_fd);
1280 void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
1281 grpc_closure* done) {
1282 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1283 GPR_ASSERT(ep->vtable == &vtable);
1284 tcp->release_fd = fd;
1285 tcp->release_fd_cb = done;
1286 grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
1287 if (grpc_event_engine_can_track_errors()) {
1288 /* Stop errors notification. */
1289 gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
1290 grpc_fd_set_error(tcp->em_fd);
1292 TCP_UNREF(tcp, "destroy");
1295 #endif /* GRPC_POSIX_SOCKET_TCP */