Built motion from commit 6a09e18b.|2.6.11
[motion2.git] / legacy-libs / grpc-cloned / deps / grpc / src / core / lib / surface / server.cc
1 /*
2  *
3  * Copyright 2015-2016 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/surface/server.h"
22
23 #include <limits.h>
24 #include <stdlib.h>
25 #include <string.h>
26
27 #include <grpc/support/alloc.h>
28 #include <grpc/support/log.h>
29 #include <grpc/support/string_util.h>
30
31 #include <utility>
32
33 #include "src/core/lib/channel/channel_args.h"
34 #include "src/core/lib/channel/channelz.h"
35 #include "src/core/lib/channel/connected_channel.h"
36 #include "src/core/lib/debug/stats.h"
37 #include "src/core/lib/gpr/mpscq.h"
38 #include "src/core/lib/gpr/spinlock.h"
39 #include "src/core/lib/gpr/string.h"
40 #include "src/core/lib/iomgr/executor.h"
41 #include "src/core/lib/iomgr/iomgr.h"
42 #include "src/core/lib/slice/slice_internal.h"
43 #include "src/core/lib/surface/api_trace.h"
44 #include "src/core/lib/surface/call.h"
45 #include "src/core/lib/surface/channel.h"
46 #include "src/core/lib/surface/completion_queue.h"
47 #include "src/core/lib/surface/init.h"
48 #include "src/core/lib/transport/metadata.h"
49 #include "src/core/lib/transport/static_metadata.h"
50
51 grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel");
52
53 static void server_on_recv_initial_metadata(void* ptr, grpc_error* error);
54 static void server_recv_trailing_metadata_ready(void* user_data,
55                                                 grpc_error* error);
56
57 namespace {
58 struct listener {
59   void* arg;
60   void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
61                 size_t pollset_count);
62   void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure);
63   struct listener* next;
64   intptr_t socket_uuid;
65   grpc_closure destroy_done;
66 };
67
68 enum requested_call_type { BATCH_CALL, REGISTERED_CALL };
69
70 struct registered_method;
71
72 struct requested_call {
73   gpr_mpscq_node request_link; /* must be first */
74   requested_call_type type;
75   size_t cq_idx;
76   void* tag;
77   grpc_server* server;
78   grpc_completion_queue* cq_bound_to_call;
79   grpc_call** call;
80   grpc_cq_completion completion;
81   grpc_metadata_array* initial_metadata;
82   union {
83     struct {
84       grpc_call_details* details;
85     } batch;
86     struct {
87       registered_method* method;
88       gpr_timespec* deadline;
89       grpc_byte_buffer** optional_payload;
90     } registered;
91   } data;
92 };
93
94 struct channel_registered_method {
95   registered_method* server_registered_method;
96   uint32_t flags;
97   bool has_host;
98   grpc_slice method;
99   grpc_slice host;
100 };
101
102 struct channel_data {
103   grpc_server* server;
104   grpc_connectivity_state connectivity_state;
105   grpc_channel* channel;
106   size_t cq_idx;
107   /* linked list of all channels on a server */
108   channel_data* next;
109   channel_data* prev;
110   channel_registered_method* registered_methods;
111   uint32_t registered_method_slots;
112   uint32_t registered_method_max_probes;
113   grpc_closure finish_destroy_channel_closure;
114   grpc_closure channel_connectivity_changed;
115   intptr_t channelz_socket_uuid;
116 };
117
118 typedef struct shutdown_tag {
119   void* tag;
120   grpc_completion_queue* cq;
121   grpc_cq_completion completion;
122 } shutdown_tag;
123
124 typedef enum {
125   /* waiting for metadata */
126   NOT_STARTED,
127   /* initial metadata read, not flow controlled in yet */
128   PENDING,
129   /* flow controlled in, on completion queue */
130   ACTIVATED,
131   /* cancelled before being queued */
132   ZOMBIED
133 } call_state;
134
135 typedef struct request_matcher request_matcher;
136
137 struct call_data {
138   call_data(grpc_call_element* elem, const grpc_call_element_args& args)
139       : call(grpc_call_from_top_element(elem)),
140         call_combiner(args.call_combiner) {
141     GRPC_CLOSURE_INIT(&server_on_recv_initial_metadata,
142                       ::server_on_recv_initial_metadata, elem,
143                       grpc_schedule_on_exec_ctx);
144     GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
145                       server_recv_trailing_metadata_ready, elem,
146                       grpc_schedule_on_exec_ctx);
147   }
148   ~call_data() {
149     GPR_ASSERT(state != PENDING);
150     GRPC_ERROR_UNREF(recv_initial_metadata_error);
151     if (host_set) {
152       grpc_slice_unref_internal(host);
153     }
154     if (path_set) {
155       grpc_slice_unref_internal(path);
156     }
157     grpc_metadata_array_destroy(&initial_metadata);
158     grpc_byte_buffer_destroy(payload);
159   }
160
161   grpc_call* call;
162
163   gpr_atm state = NOT_STARTED;
164
165   bool path_set = false;
166   bool host_set = false;
167   grpc_slice path;
168   grpc_slice host;
169   grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
170
171   grpc_completion_queue* cq_new = nullptr;
172
173   grpc_metadata_batch* recv_initial_metadata = nullptr;
174   uint32_t recv_initial_metadata_flags = 0;
175   grpc_metadata_array initial_metadata =
176       grpc_metadata_array();  // Zero-initialize the C struct.
177
178   request_matcher* matcher = nullptr;
179   grpc_byte_buffer* payload = nullptr;
180
181   grpc_closure got_initial_metadata;
182   grpc_closure server_on_recv_initial_metadata;
183   grpc_closure kill_zombie_closure;
184   grpc_closure* on_done_recv_initial_metadata;
185   grpc_closure recv_trailing_metadata_ready;
186   grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
187   grpc_closure* original_recv_trailing_metadata_ready;
188   grpc_error* recv_trailing_metadata_error = GRPC_ERROR_NONE;
189   bool seen_recv_trailing_metadata_ready = false;
190
191   grpc_closure publish;
192
193   call_data* pending_next = nullptr;
194   grpc_core::CallCombiner* call_combiner;
195 };
196
197 struct request_matcher {
198   grpc_server* server;
199   call_data* pending_head;
200   call_data* pending_tail;
201   gpr_locked_mpscq* requests_per_cq;
202 };
203
204 struct registered_method {
205   char* method;
206   char* host;
207   grpc_server_register_method_payload_handling payload_handling;
208   uint32_t flags;
209   /* one request matcher per method */
210   request_matcher matcher;
211   registered_method* next;
212 };
213
214 typedef struct {
215   grpc_channel** channels;
216   size_t num_channels;
217 } channel_broadcaster;
218 }  // namespace
219
220 struct grpc_server {
221   grpc_channel_args* channel_args;
222
223   grpc_resource_user* default_resource_user;
224
225   grpc_completion_queue** cqs;
226   grpc_pollset** pollsets;
227   size_t cq_count;
228   size_t pollset_count;
229   bool started;
230
231   /* The two following mutexes control access to server-state
232      mu_global controls access to non-call-related state (e.g., channel state)
233      mu_call controls access to call-related state (e.g., the call lists)
234
235      If they are ever required to be nested, you must lock mu_global
236      before mu_call. This is currently used in shutdown processing
237      (grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
238   gpr_mu mu_global; /* mutex for server and channel state */
239   gpr_mu mu_call;   /* mutex for call-specific state */
240
241   /* startup synchronization: flag is protected by mu_global, signals whether
242      we are doing the listener start routine or not */
243   bool starting;
244   gpr_cv starting_cv;
245
246   registered_method* registered_methods;
247   /** one request matcher for unregistered methods */
248   request_matcher unregistered_request_matcher;
249
250   gpr_atm shutdown_flag;
251   uint8_t shutdown_published;
252   size_t num_shutdown_tags;
253   shutdown_tag* shutdown_tags;
254
255   channel_data root_channel_data;
256
257   listener* listeners;
258   int listeners_destroyed;
259   grpc_core::RefCount internal_refcount;
260
261   /** when did we print the last shutdown progress message */
262   gpr_timespec last_shutdown_message_time;
263
264   grpc_core::RefCountedPtr<grpc_core::channelz::ServerNode> channelz_server;
265 };
266
267 #define SERVER_FROM_CALL_ELEM(elem) \
268   (((channel_data*)(elem)->channel_data)->server)
269
270 static void publish_new_rpc(void* calld, grpc_error* error);
271 static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc,
272                       grpc_error* error);
273 /* Before calling maybe_finish_shutdown, we must hold mu_global and not
274    hold mu_call */
275 static void maybe_finish_shutdown(grpc_server* server);
276
277 /*
278  * channel broadcaster
279  */
280
281 /* assumes server locked */
282 static void channel_broadcaster_init(grpc_server* s, channel_broadcaster* cb) {
283   channel_data* c;
284   size_t count = 0;
285   for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
286     count++;
287   }
288   cb->num_channels = count;
289   cb->channels = static_cast<grpc_channel**>(
290       gpr_malloc(sizeof(*cb->channels) * cb->num_channels));
291   count = 0;
292   for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) {
293     cb->channels[count++] = c->channel;
294     GRPC_CHANNEL_INTERNAL_REF(c->channel, "broadcast");
295   }
296 }
297
298 struct shutdown_cleanup_args {
299   grpc_closure closure;
300   grpc_slice slice;
301 };
302
303 static void shutdown_cleanup(void* arg, grpc_error* error) {
304   struct shutdown_cleanup_args* a =
305       static_cast<struct shutdown_cleanup_args*>(arg);
306   grpc_slice_unref_internal(a->slice);
307   gpr_free(a);
308 }
309
310 static void send_shutdown(grpc_channel* channel, bool send_goaway,
311                           grpc_error* send_disconnect) {
312   struct shutdown_cleanup_args* sc =
313       static_cast<struct shutdown_cleanup_args*>(gpr_malloc(sizeof(*sc)));
314   GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
315                     grpc_schedule_on_exec_ctx);
316   grpc_transport_op* op = grpc_make_transport_op(&sc->closure);
317   grpc_channel_element* elem;
318
319   op->goaway_error =
320       send_goaway ? grpc_error_set_int(
321                         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"),
322                         GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK)
323                   : GRPC_ERROR_NONE;
324   op->set_accept_stream = true;
325   sc->slice = grpc_slice_from_copied_string("Server shutdown");
326   op->disconnect_with_error = send_disconnect;
327
328   elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
329   elem->filter->start_transport_op(elem, op);
330 }
331
332 static void channel_broadcaster_shutdown(channel_broadcaster* cb,
333                                          bool send_goaway,
334                                          grpc_error* force_disconnect) {
335   size_t i;
336
337   for (i = 0; i < cb->num_channels; i++) {
338     send_shutdown(cb->channels[i], send_goaway,
339                   GRPC_ERROR_REF(force_disconnect));
340     GRPC_CHANNEL_INTERNAL_UNREF(cb->channels[i], "broadcast");
341   }
342   gpr_free(cb->channels);
343   GRPC_ERROR_UNREF(force_disconnect);
344 }
345
346 /*
347  * request_matcher
348  */
349
350 static void request_matcher_init(request_matcher* rm, grpc_server* server) {
351   rm->server = server;
352   rm->pending_head = rm->pending_tail = nullptr;
353   rm->requests_per_cq = static_cast<gpr_locked_mpscq*>(
354       gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count));
355   for (size_t i = 0; i < server->cq_count; i++) {
356     gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
357   }
358 }
359
360 static void request_matcher_destroy(request_matcher* rm) {
361   for (size_t i = 0; i < rm->server->cq_count; i++) {
362     GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == nullptr);
363     gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
364   }
365   gpr_free(rm->requests_per_cq);
366 }
367
368 static void kill_zombie(void* elem, grpc_error* error) {
369   grpc_call_unref(
370       grpc_call_from_top_element(static_cast<grpc_call_element*>(elem)));
371 }
372
373 static void request_matcher_zombify_all_pending_calls(request_matcher* rm) {
374   while (rm->pending_head) {
375     call_data* calld = rm->pending_head;
376     rm->pending_head = calld->pending_next;
377     gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
378     GRPC_CLOSURE_INIT(
379         &calld->kill_zombie_closure, kill_zombie,
380         grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
381         grpc_schedule_on_exec_ctx);
382     GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
383   }
384 }
385
386 static void request_matcher_kill_requests(grpc_server* server,
387                                           request_matcher* rm,
388                                           grpc_error* error) {
389   requested_call* rc;
390   for (size_t i = 0; i < server->cq_count; i++) {
391     while ((rc = reinterpret_cast<requested_call*>(
392                 gpr_locked_mpscq_pop(&rm->requests_per_cq[i]))) != nullptr) {
393       fail_call(server, i, rc, GRPC_ERROR_REF(error));
394     }
395   }
396   GRPC_ERROR_UNREF(error);
397 }
398
399 /*
400  * server proper
401  */
402
403 static void server_ref(grpc_server* server) { server->internal_refcount.Ref(); }
404
405 static void server_delete(grpc_server* server) {
406   registered_method* rm;
407   size_t i;
408   server->channelz_server.reset();
409   grpc_channel_args_destroy(server->channel_args);
410   gpr_mu_destroy(&server->mu_global);
411   gpr_mu_destroy(&server->mu_call);
412   gpr_cv_destroy(&server->starting_cv);
413   while ((rm = server->registered_methods) != nullptr) {
414     server->registered_methods = rm->next;
415     if (server->started) {
416       request_matcher_destroy(&rm->matcher);
417     }
418     gpr_free(rm->method);
419     gpr_free(rm->host);
420     gpr_free(rm);
421   }
422   if (server->started) {
423     request_matcher_destroy(&server->unregistered_request_matcher);
424   }
425   for (i = 0; i < server->cq_count; i++) {
426     GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
427   }
428   gpr_free(server->cqs);
429   gpr_free(server->pollsets);
430   gpr_free(server->shutdown_tags);
431   gpr_free(server);
432 }
433
434 static void server_unref(grpc_server* server) {
435   if (GPR_UNLIKELY(server->internal_refcount.Unref())) {
436     server_delete(server);
437   }
438 }
439
440 static int is_channel_orphaned(channel_data* chand) {
441   return chand->next == chand;
442 }
443
444 static void orphan_channel(channel_data* chand) {
445   chand->next->prev = chand->prev;
446   chand->prev->next = chand->next;
447   chand->next = chand->prev = chand;
448 }
449
450 static void finish_destroy_channel(void* cd, grpc_error* error) {
451   channel_data* chand = static_cast<channel_data*>(cd);
452   grpc_server* server = chand->server;
453   GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server");
454   server_unref(server);
455 }
456
457 static void destroy_channel(channel_data* chand, grpc_error* error) {
458   if (is_channel_orphaned(chand)) return;
459   GPR_ASSERT(chand->server != nullptr);
460   orphan_channel(chand);
461   server_ref(chand->server);
462   maybe_finish_shutdown(chand->server);
463   GRPC_CLOSURE_INIT(&chand->finish_destroy_channel_closure,
464                     finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
465
466   if (GRPC_TRACE_FLAG_ENABLED(grpc_server_channel_trace) &&
467       error != GRPC_ERROR_NONE) {
468     const char* msg = grpc_error_string(error);
469     gpr_log(GPR_INFO, "Disconnected client: %s", msg);
470   }
471   GRPC_ERROR_UNREF(error);
472
473   grpc_transport_op* op =
474       grpc_make_transport_op(&chand->finish_destroy_channel_closure);
475   op->set_accept_stream = true;
476   grpc_channel_next_op(grpc_channel_stack_element(
477                            grpc_channel_get_channel_stack(chand->channel), 0),
478                        op);
479 }
480
481 static void done_request_event(void* req, grpc_cq_completion* c) {
482   gpr_free(req);
483 }
484
485 static void publish_call(grpc_server* server, call_data* calld, size_t cq_idx,
486                          requested_call* rc) {
487   grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call);
488   grpc_call* call = calld->call;
489   *rc->call = call;
490   calld->cq_new = server->cqs[cq_idx];
491   GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata);
492   switch (rc->type) {
493     case BATCH_CALL:
494       GPR_ASSERT(calld->host_set);
495       GPR_ASSERT(calld->path_set);
496       rc->data.batch.details->host = grpc_slice_ref_internal(calld->host);
497       rc->data.batch.details->method = grpc_slice_ref_internal(calld->path);
498       rc->data.batch.details->deadline =
499           grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
500       rc->data.batch.details->flags = calld->recv_initial_metadata_flags;
501       break;
502     case REGISTERED_CALL:
503       *rc->data.registered.deadline =
504           grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC);
505       if (rc->data.registered.optional_payload) {
506         *rc->data.registered.optional_payload = calld->payload;
507         calld->payload = nullptr;
508       }
509       break;
510     default:
511       GPR_UNREACHABLE_CODE(return );
512   }
513
514   grpc_cq_end_op(calld->cq_new, rc->tag, GRPC_ERROR_NONE, done_request_event,
515                  rc, &rc->completion, true);
516 }
517
518 static void publish_new_rpc(void* arg, grpc_error* error) {
519   grpc_call_element* call_elem = static_cast<grpc_call_element*>(arg);
520   call_data* calld = static_cast<call_data*>(call_elem->call_data);
521   channel_data* chand = static_cast<channel_data*>(call_elem->channel_data);
522   request_matcher* rm = calld->matcher;
523   grpc_server* server = rm->server;
524
525   if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) {
526     gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
527     GRPC_CLOSURE_INIT(
528         &calld->kill_zombie_closure, kill_zombie,
529         grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
530         grpc_schedule_on_exec_ctx);
531     GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_REF(error));
532     return;
533   }
534
535   for (size_t i = 0; i < server->cq_count; i++) {
536     size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
537     requested_call* rc = reinterpret_cast<requested_call*>(
538         gpr_locked_mpscq_try_pop(&rm->requests_per_cq[cq_idx]));
539     if (rc == nullptr) {
540       continue;
541     } else {
542       GRPC_STATS_INC_SERVER_CQS_CHECKED(i);
543       gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
544       publish_call(server, calld, cq_idx, rc);
545       return; /* early out */
546     }
547   }
548
549   /* no cq to take the request found: queue it on the slow list */
550   GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED();
551   gpr_mu_lock(&server->mu_call);
552
553   // We need to ensure that all the queues are empty.  We do this under
554   // the server mu_call lock to ensure that if something is added to
555   // an empty request queue, it will block until the call is actually
556   // added to the pending list.
557   for (size_t i = 0; i < server->cq_count; i++) {
558     size_t cq_idx = (chand->cq_idx + i) % server->cq_count;
559     requested_call* rc = reinterpret_cast<requested_call*>(
560         gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
561     if (rc == nullptr) {
562       continue;
563     } else {
564       gpr_mu_unlock(&server->mu_call);
565       GRPC_STATS_INC_SERVER_CQS_CHECKED(i + server->cq_count);
566       gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
567       publish_call(server, calld, cq_idx, rc);
568       return; /* early out */
569     }
570   }
571
572   gpr_atm_no_barrier_store(&calld->state, PENDING);
573   if (rm->pending_head == nullptr) {
574     rm->pending_tail = rm->pending_head = calld;
575   } else {
576     rm->pending_tail->pending_next = calld;
577     rm->pending_tail = calld;
578   }
579   calld->pending_next = nullptr;
580   gpr_mu_unlock(&server->mu_call);
581 }
582
583 static void finish_start_new_rpc(
584     grpc_server* server, grpc_call_element* elem, request_matcher* rm,
585     grpc_server_register_method_payload_handling payload_handling) {
586   call_data* calld = static_cast<call_data*>(elem->call_data);
587
588   if (gpr_atm_acq_load(&server->shutdown_flag)) {
589     gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
590     GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
591                       grpc_schedule_on_exec_ctx);
592     GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
593     return;
594   }
595
596   calld->matcher = rm;
597
598   switch (payload_handling) {
599     case GRPC_SRM_PAYLOAD_NONE:
600       publish_new_rpc(elem, GRPC_ERROR_NONE);
601       break;
602     case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: {
603       grpc_op op;
604       op.op = GRPC_OP_RECV_MESSAGE;
605       op.flags = 0;
606       op.reserved = nullptr;
607       op.data.recv_message.recv_message = &calld->payload;
608       GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem,
609                         grpc_schedule_on_exec_ctx);
610       grpc_call_start_batch_and_execute(calld->call, &op, 1, &calld->publish);
611       break;
612     }
613   }
614 }
615
616 static void start_new_rpc(grpc_call_element* elem) {
617   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
618   call_data* calld = static_cast<call_data*>(elem->call_data);
619   grpc_server* server = chand->server;
620   uint32_t i;
621   uint32_t hash;
622   channel_registered_method* rm;
623
624   if (chand->registered_methods && calld->path_set && calld->host_set) {
625     /* TODO(ctiller): unify these two searches */
626     /* check for an exact match with host */
627     hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash_internal(calld->host),
628                               grpc_slice_hash_internal(calld->path));
629     for (i = 0; i <= chand->registered_method_max_probes; i++) {
630       rm = &chand->registered_methods[(hash + i) %
631                                       chand->registered_method_slots];
632       if (rm->server_registered_method == nullptr) break;
633       if (!rm->has_host) continue;
634       if (!grpc_slice_eq(rm->host, calld->host)) continue;
635       if (!grpc_slice_eq(rm->method, calld->path)) continue;
636       if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
637           0 == (calld->recv_initial_metadata_flags &
638                 GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
639         continue;
640       }
641       finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher,
642                            rm->server_registered_method->payload_handling);
643       return;
644     }
645     /* check for a wildcard method definition (no host set) */
646     hash = GRPC_MDSTR_KV_HASH(0, grpc_slice_hash_internal(calld->path));
647     for (i = 0; i <= chand->registered_method_max_probes; i++) {
648       rm = &chand->registered_methods[(hash + i) %
649                                       chand->registered_method_slots];
650       if (rm->server_registered_method == nullptr) break;
651       if (rm->has_host) continue;
652       if (!grpc_slice_eq(rm->method, calld->path)) continue;
653       if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
654           0 == (calld->recv_initial_metadata_flags &
655                 GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
656         continue;
657       }
658       finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher,
659                            rm->server_registered_method->payload_handling);
660       return;
661     }
662   }
663   finish_start_new_rpc(server, elem, &server->unregistered_request_matcher,
664                        GRPC_SRM_PAYLOAD_NONE);
665 }
666
667 static int num_listeners(grpc_server* server) {
668   listener* l;
669   int n = 0;
670   for (l = server->listeners; l; l = l->next) {
671     n++;
672   }
673   return n;
674 }
675
676 static void done_shutdown_event(void* server, grpc_cq_completion* completion) {
677   server_unref(static_cast<grpc_server*>(server));
678 }
679
680 static int num_channels(grpc_server* server) {
681   channel_data* chand;
682   int n = 0;
683   for (chand = server->root_channel_data.next;
684        chand != &server->root_channel_data; chand = chand->next) {
685     n++;
686   }
687   return n;
688 }
689
690 static void kill_pending_work_locked(grpc_server* server, grpc_error* error) {
691   if (server->started) {
692     request_matcher_kill_requests(server, &server->unregistered_request_matcher,
693                                   GRPC_ERROR_REF(error));
694     request_matcher_zombify_all_pending_calls(
695         &server->unregistered_request_matcher);
696     for (registered_method* rm = server->registered_methods; rm;
697          rm = rm->next) {
698       request_matcher_kill_requests(server, &rm->matcher,
699                                     GRPC_ERROR_REF(error));
700       request_matcher_zombify_all_pending_calls(&rm->matcher);
701     }
702   }
703   GRPC_ERROR_UNREF(error);
704 }
705
706 static void maybe_finish_shutdown(grpc_server* server) {
707   size_t i;
708   if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
709     return;
710   }
711
712   gpr_mu_lock(&server->mu_call);
713   kill_pending_work_locked(
714       server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
715   gpr_mu_unlock(&server->mu_call);
716
717   if (server->root_channel_data.next != &server->root_channel_data ||
718       server->listeners_destroyed < num_listeners(server)) {
719     if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
720                                   server->last_shutdown_message_time),
721                      gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
722       server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
723       gpr_log(GPR_DEBUG,
724               "Waiting for %d channels and %d/%d listeners to be destroyed"
725               " before shutting down server",
726               num_channels(server),
727               num_listeners(server) - server->listeners_destroyed,
728               num_listeners(server));
729     }
730     return;
731   }
732   server->shutdown_published = 1;
733   for (i = 0; i < server->num_shutdown_tags; i++) {
734     server_ref(server);
735     grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag,
736                    GRPC_ERROR_NONE, done_shutdown_event, server,
737                    &server->shutdown_tags[i].completion);
738   }
739 }
740
741 static void server_on_recv_initial_metadata(void* ptr, grpc_error* error) {
742   grpc_call_element* elem = static_cast<grpc_call_element*>(ptr);
743   call_data* calld = static_cast<call_data*>(elem->call_data);
744   grpc_millis op_deadline;
745
746   if (error == GRPC_ERROR_NONE) {
747     GPR_DEBUG_ASSERT(calld->recv_initial_metadata->idx.named.path != nullptr);
748     GPR_DEBUG_ASSERT(calld->recv_initial_metadata->idx.named.authority !=
749                      nullptr);
750     calld->path = grpc_slice_ref_internal(
751         GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md));
752     calld->host = grpc_slice_ref_internal(
753         GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.authority->md));
754     calld->path_set = true;
755     calld->host_set = true;
756     grpc_metadata_batch_remove(calld->recv_initial_metadata, GRPC_BATCH_PATH);
757     grpc_metadata_batch_remove(calld->recv_initial_metadata,
758                                GRPC_BATCH_AUTHORITY);
759   } else {
760     GRPC_ERROR_REF(error);
761   }
762   op_deadline = calld->recv_initial_metadata->deadline;
763   if (op_deadline != GRPC_MILLIS_INF_FUTURE) {
764     calld->deadline = op_deadline;
765   }
766   if (calld->host_set && calld->path_set) {
767     /* do nothing */
768   } else {
769     /* Pass the error reference to calld->recv_initial_metadata_error */
770     grpc_error* src_error = error;
771     error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
772         "Missing :authority or :path", &src_error, 1);
773     GRPC_ERROR_UNREF(src_error);
774     calld->recv_initial_metadata_error = GRPC_ERROR_REF(error);
775   }
776   grpc_closure* closure = calld->on_done_recv_initial_metadata;
777   calld->on_done_recv_initial_metadata = nullptr;
778   if (calld->seen_recv_trailing_metadata_ready) {
779     GRPC_CALL_COMBINER_START(calld->call_combiner,
780                              &calld->recv_trailing_metadata_ready,
781                              calld->recv_trailing_metadata_error,
782                              "continue server_recv_trailing_metadata_ready");
783   }
784   GRPC_CLOSURE_RUN(closure, error);
785 }
786
787 static void server_recv_trailing_metadata_ready(void* user_data,
788                                                 grpc_error* error) {
789   grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
790   call_data* calld = static_cast<call_data*>(elem->call_data);
791   if (calld->on_done_recv_initial_metadata != nullptr) {
792     calld->recv_trailing_metadata_error = GRPC_ERROR_REF(error);
793     calld->seen_recv_trailing_metadata_ready = true;
794     GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
795                       server_recv_trailing_metadata_ready, elem,
796                       grpc_schedule_on_exec_ctx);
797     GRPC_CALL_COMBINER_STOP(calld->call_combiner,
798                             "deferring server_recv_trailing_metadata_ready "
799                             "until after server_on_recv_initial_metadata");
800     return;
801   }
802   error =
803       grpc_error_add_child(GRPC_ERROR_REF(error),
804                            GRPC_ERROR_REF(calld->recv_initial_metadata_error));
805   GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, error);
806 }
807
808 static void server_mutate_op(grpc_call_element* elem,
809                              grpc_transport_stream_op_batch* op) {
810   call_data* calld = static_cast<call_data*>(elem->call_data);
811
812   if (op->recv_initial_metadata) {
813     GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == nullptr);
814     calld->recv_initial_metadata =
815         op->payload->recv_initial_metadata.recv_initial_metadata;
816     calld->on_done_recv_initial_metadata =
817         op->payload->recv_initial_metadata.recv_initial_metadata_ready;
818     op->payload->recv_initial_metadata.recv_initial_metadata_ready =
819         &calld->server_on_recv_initial_metadata;
820     op->payload->recv_initial_metadata.recv_flags =
821         &calld->recv_initial_metadata_flags;
822   }
823   if (op->recv_trailing_metadata) {
824     calld->original_recv_trailing_metadata_ready =
825         op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
826     op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
827         &calld->recv_trailing_metadata_ready;
828   }
829 }
830
831 static void server_start_transport_stream_op_batch(
832     grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
833   server_mutate_op(elem, op);
834   grpc_call_next_op(elem, op);
835 }
836
837 static void got_initial_metadata(void* ptr, grpc_error* error) {
838   grpc_call_element* elem = static_cast<grpc_call_element*>(ptr);
839   call_data* calld = static_cast<call_data*>(elem->call_data);
840   if (error == GRPC_ERROR_NONE) {
841     start_new_rpc(elem);
842   } else {
843     if (gpr_atm_full_cas(&calld->state, NOT_STARTED, ZOMBIED)) {
844       GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
845                         grpc_schedule_on_exec_ctx);
846       GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
847     } else if (gpr_atm_full_cas(&calld->state, PENDING, ZOMBIED)) {
848       /* zombied call will be destroyed when it's removed from the pending
849          queue... later */
850     }
851   }
852 }
853
854 static void accept_stream(void* cd, grpc_transport* transport,
855                           const void* transport_server_data) {
856   channel_data* chand = static_cast<channel_data*>(cd);
857   /* create a call */
858   grpc_call_create_args args;
859   args.channel = chand->channel;
860   args.server = chand->server;
861   args.parent = nullptr;
862   args.propagation_mask = 0;
863   args.cq = nullptr;
864   args.pollset_set_alternative = nullptr;
865   args.server_transport_data = transport_server_data;
866   args.add_initial_metadata = nullptr;
867   args.add_initial_metadata_count = 0;
868   args.send_deadline = GRPC_MILLIS_INF_FUTURE;
869   grpc_call* call;
870   grpc_error* error = grpc_call_create(&args, &call);
871   grpc_call_element* elem =
872       grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
873   if (error != GRPC_ERROR_NONE) {
874     got_initial_metadata(elem, error);
875     GRPC_ERROR_UNREF(error);
876     return;
877   }
878   call_data* calld = static_cast<call_data*>(elem->call_data);
879   grpc_op op;
880   op.op = GRPC_OP_RECV_INITIAL_METADATA;
881   op.flags = 0;
882   op.reserved = nullptr;
883   op.data.recv_initial_metadata.recv_initial_metadata =
884       &calld->initial_metadata;
885   GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
886                     grpc_schedule_on_exec_ctx);
887   grpc_call_start_batch_and_execute(call, &op, 1, &calld->got_initial_metadata);
888 }
889
890 static void channel_connectivity_changed(void* cd, grpc_error* error) {
891   channel_data* chand = static_cast<channel_data*>(cd);
892   grpc_server* server = chand->server;
893   if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
894     grpc_transport_op* op = grpc_make_transport_op(nullptr);
895     op->on_connectivity_state_change = &chand->channel_connectivity_changed;
896     op->connectivity_state = &chand->connectivity_state;
897     grpc_channel_next_op(grpc_channel_stack_element(
898                              grpc_channel_get_channel_stack(chand->channel), 0),
899                          op);
900   } else {
901     gpr_mu_lock(&server->mu_global);
902     destroy_channel(chand, GRPC_ERROR_REF(error));
903     gpr_mu_unlock(&server->mu_global);
904     GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "connectivity");
905   }
906 }
907
908 static grpc_error* init_call_elem(grpc_call_element* elem,
909                                   const grpc_call_element_args* args) {
910   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
911   server_ref(chand->server);
912   new (elem->call_data) call_data(elem, *args);
913   return GRPC_ERROR_NONE;
914 }
915
916 static void destroy_call_elem(grpc_call_element* elem,
917                               const grpc_call_final_info* final_info,
918                               grpc_closure* ignored) {
919   call_data* calld = static_cast<call_data*>(elem->call_data);
920   calld->~call_data();
921   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
922   server_unref(chand->server);
923 }
924
925 static grpc_error* init_channel_elem(grpc_channel_element* elem,
926                                      grpc_channel_element_args* args) {
927   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
928   GPR_ASSERT(args->is_first);
929   GPR_ASSERT(!args->is_last);
930   chand->server = nullptr;
931   chand->channel = nullptr;
932   chand->next = chand->prev = chand;
933   chand->registered_methods = nullptr;
934   chand->connectivity_state = GRPC_CHANNEL_IDLE;
935   GRPC_CLOSURE_INIT(&chand->channel_connectivity_changed,
936                     channel_connectivity_changed, chand,
937                     grpc_schedule_on_exec_ctx);
938   return GRPC_ERROR_NONE;
939 }
940
941 static void destroy_channel_elem(grpc_channel_element* elem) {
942   size_t i;
943   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
944   if (chand->registered_methods) {
945     for (i = 0; i < chand->registered_method_slots; i++) {
946       grpc_slice_unref_internal(chand->registered_methods[i].method);
947       if (chand->registered_methods[i].has_host) {
948         grpc_slice_unref_internal(chand->registered_methods[i].host);
949       }
950     }
951     gpr_free(chand->registered_methods);
952   }
953   if (chand->server) {
954     if (chand->server->channelz_server != nullptr &&
955         chand->channelz_socket_uuid != 0) {
956       chand->server->channelz_server->RemoveChildSocket(
957           chand->channelz_socket_uuid);
958     }
959     gpr_mu_lock(&chand->server->mu_global);
960     chand->next->prev = chand->prev;
961     chand->prev->next = chand->next;
962     chand->next = chand->prev = chand;
963     maybe_finish_shutdown(chand->server);
964     gpr_mu_unlock(&chand->server->mu_global);
965     server_unref(chand->server);
966   }
967 }
968
969 const grpc_channel_filter grpc_server_top_filter = {
970     server_start_transport_stream_op_batch,
971     grpc_channel_next_op,
972     sizeof(call_data),
973     init_call_elem,
974     grpc_call_stack_ignore_set_pollset_or_pollset_set,
975     destroy_call_elem,
976     sizeof(channel_data),
977     init_channel_elem,
978     destroy_channel_elem,
979     grpc_channel_next_get_info,
980     "server",
981 };
982
983 static void register_completion_queue(grpc_server* server,
984                                       grpc_completion_queue* cq,
985                                       void* reserved) {
986   size_t i, n;
987   GPR_ASSERT(!reserved);
988   for (i = 0; i < server->cq_count; i++) {
989     if (server->cqs[i] == cq) return;
990   }
991
992   GRPC_CQ_INTERNAL_REF(cq, "server");
993   n = server->cq_count++;
994   server->cqs = static_cast<grpc_completion_queue**>(gpr_realloc(
995       server->cqs, server->cq_count * sizeof(grpc_completion_queue*)));
996   server->cqs[n] = cq;
997 }
998
999 void grpc_server_register_completion_queue(grpc_server* server,
1000                                            grpc_completion_queue* cq,
1001                                            void* reserved) {
1002   GRPC_API_TRACE(
1003       "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
1004       (server, cq, reserved));
1005
1006   auto cq_type = grpc_get_cq_completion_type(cq);
1007   if (cq_type != GRPC_CQ_NEXT && cq_type != GRPC_CQ_CALLBACK) {
1008     gpr_log(GPR_INFO,
1009             "Completion queue of type %d is being registered as a "
1010             "server-completion-queue",
1011             static_cast<int>(cq_type));
1012     /* Ideally we should log an error and abort but ruby-wrapped-language API
1013        calls grpc_completion_queue_pluck() on server completion queues */
1014   }
1015
1016   register_completion_queue(server, cq, reserved);
1017 }
1018
1019 grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
1020   grpc_core::ExecCtx exec_ctx;
1021   GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
1022
1023   grpc_server* server =
1024       static_cast<grpc_server*>(gpr_zalloc(sizeof(grpc_server)));
1025
1026   gpr_mu_init(&server->mu_global);
1027   gpr_mu_init(&server->mu_call);
1028   gpr_cv_init(&server->starting_cv);
1029
1030   /* decremented by grpc_server_destroy */
1031   new (&server->internal_refcount) grpc_core::RefCount();
1032   server->root_channel_data.next = server->root_channel_data.prev =
1033       &server->root_channel_data;
1034
1035   server->channel_args = grpc_channel_args_copy(args);
1036
1037   const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_ENABLE_CHANNELZ);
1038   if (grpc_channel_arg_get_bool(arg, GRPC_ENABLE_CHANNELZ_DEFAULT)) {
1039     arg = grpc_channel_args_find(
1040         args, GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE);
1041     size_t channel_tracer_max_memory = grpc_channel_arg_get_integer(
1042         arg,
1043         {GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX});
1044     server->channelz_server =
1045         grpc_core::MakeRefCounted<grpc_core::channelz::ServerNode>(
1046             server, channel_tracer_max_memory);
1047     server->channelz_server->AddTraceEvent(
1048         grpc_core::channelz::ChannelTrace::Severity::Info,
1049         grpc_slice_from_static_string("Server created"));
1050   }
1051
1052   if (args != nullptr) {
1053     grpc_resource_quota* resource_quota =
1054         grpc_resource_quota_from_channel_args(args, false /* create */);
1055     if (resource_quota != nullptr) {
1056       server->default_resource_user =
1057           grpc_resource_user_create(resource_quota, "default");
1058     }
1059   }
1060
1061   return server;
1062 }
1063
1064 static int streq(const char* a, const char* b) {
1065   if (a == nullptr && b == nullptr) return 1;
1066   if (a == nullptr) return 0;
1067   if (b == nullptr) return 0;
1068   return 0 == strcmp(a, b);
1069 }
1070
1071 void* grpc_server_register_method(
1072     grpc_server* server, const char* method, const char* host,
1073     grpc_server_register_method_payload_handling payload_handling,
1074     uint32_t flags) {
1075   registered_method* m;
1076   GRPC_API_TRACE(
1077       "grpc_server_register_method(server=%p, method=%s, host=%s, "
1078       "flags=0x%08x)",
1079       4, (server, method, host, flags));
1080   if (!method) {
1081     gpr_log(GPR_ERROR,
1082             "grpc_server_register_method method string cannot be NULL");
1083     return nullptr;
1084   }
1085   for (m = server->registered_methods; m; m = m->next) {
1086     if (streq(m->method, method) && streq(m->host, host)) {
1087       gpr_log(GPR_ERROR, "duplicate registration for %s@%s", method,
1088               host ? host : "*");
1089       return nullptr;
1090     }
1091   }
1092   if ((flags & ~GRPC_INITIAL_METADATA_USED_MASK) != 0) {
1093     gpr_log(GPR_ERROR, "grpc_server_register_method invalid flags 0x%08x",
1094             flags);
1095     return nullptr;
1096   }
1097   m = static_cast<registered_method*>(gpr_zalloc(sizeof(registered_method)));
1098   m->method = gpr_strdup(method);
1099   m->host = gpr_strdup(host);
1100   m->next = server->registered_methods;
1101   m->payload_handling = payload_handling;
1102   m->flags = flags;
1103   server->registered_methods = m;
1104   return m;
1105 }
1106
1107 void grpc_server_start(grpc_server* server) {
1108   size_t i;
1109   grpc_core::ExecCtx exec_ctx;
1110
1111   GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
1112
1113   server->started = true;
1114   server->pollset_count = 0;
1115   server->pollsets = static_cast<grpc_pollset**>(
1116       gpr_malloc(sizeof(grpc_pollset*) * server->cq_count));
1117   for (i = 0; i < server->cq_count; i++) {
1118     if (grpc_cq_can_listen(server->cqs[i])) {
1119       server->pollsets[server->pollset_count++] =
1120           grpc_cq_pollset(server->cqs[i]);
1121     }
1122   }
1123   request_matcher_init(&server->unregistered_request_matcher, server);
1124   for (registered_method* rm = server->registered_methods; rm; rm = rm->next) {
1125     request_matcher_init(&rm->matcher, server);
1126   }
1127
1128   gpr_mu_lock(&server->mu_global);
1129   server->starting = true;
1130   gpr_mu_unlock(&server->mu_global);
1131
1132   for (listener* l = server->listeners; l; l = l->next) {
1133     l->start(server, l->arg, server->pollsets, server->pollset_count);
1134   }
1135
1136   gpr_mu_lock(&server->mu_global);
1137   server->starting = false;
1138   gpr_cv_signal(&server->starting_cv);
1139   gpr_mu_unlock(&server->mu_global);
1140 }
1141
1142 void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
1143                               size_t* pollset_count) {
1144   *pollset_count = server->pollset_count;
1145   *pollsets = server->pollsets;
1146 }
1147
1148 void grpc_server_setup_transport(
1149     grpc_server* s, grpc_transport* transport, grpc_pollset* accepting_pollset,
1150     const grpc_channel_args* args,
1151     const grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>&
1152         socket_node,
1153     grpc_resource_user* resource_user) {
1154   size_t num_registered_methods;
1155   size_t alloc;
1156   registered_method* rm;
1157   channel_registered_method* crm;
1158   grpc_channel* channel;
1159   channel_data* chand;
1160   uint32_t hash;
1161   size_t slots;
1162   uint32_t probes;
1163   uint32_t max_probes = 0;
1164   grpc_transport_op* op = nullptr;
1165
1166   channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport,
1167                                 resource_user);
1168   chand = static_cast<channel_data*>(
1169       grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0)
1170           ->channel_data);
1171   chand->server = s;
1172   server_ref(s);
1173   chand->channel = channel;
1174   if (socket_node != nullptr) {
1175     chand->channelz_socket_uuid = socket_node->uuid();
1176     s->channelz_server->AddChildSocket(socket_node);
1177   } else {
1178     chand->channelz_socket_uuid = 0;
1179   }
1180
1181   size_t cq_idx;
1182   for (cq_idx = 0; cq_idx < s->cq_count; cq_idx++) {
1183     if (grpc_cq_pollset(s->cqs[cq_idx]) == accepting_pollset) break;
1184   }
1185   if (cq_idx == s->cq_count) {
1186     /* completion queue not found: pick a random one to publish new calls to */
1187     cq_idx = static_cast<size_t>(rand()) % s->cq_count;
1188   }
1189   chand->cq_idx = cq_idx;
1190
1191   num_registered_methods = 0;
1192   for (rm = s->registered_methods; rm; rm = rm->next) {
1193     num_registered_methods++;
1194   }
1195   /* build a lookup table phrased in terms of mdstr's in this channels context
1196      to quickly find registered methods */
1197   if (num_registered_methods > 0) {
1198     slots = 2 * num_registered_methods;
1199     alloc = sizeof(channel_registered_method) * slots;
1200     chand->registered_methods =
1201         static_cast<channel_registered_method*>(gpr_zalloc(alloc));
1202     for (rm = s->registered_methods; rm; rm = rm->next) {
1203       grpc_slice host;
1204       bool has_host;
1205       grpc_slice method;
1206       if (rm->host != nullptr) {
1207         host = grpc_slice_from_static_string(rm->host);
1208         has_host = true;
1209       } else {
1210         has_host = false;
1211       }
1212       method = grpc_slice_from_static_string(rm->method);
1213       hash = GRPC_MDSTR_KV_HASH(has_host ? grpc_slice_hash_internal(host) : 0,
1214                                 grpc_slice_hash_internal(method));
1215       for (probes = 0; chand->registered_methods[(hash + probes) % slots]
1216                            .server_registered_method != nullptr;
1217            probes++)
1218         ;
1219       if (probes > max_probes) max_probes = probes;
1220       crm = &chand->registered_methods[(hash + probes) % slots];
1221       crm->server_registered_method = rm;
1222       crm->flags = rm->flags;
1223       crm->has_host = has_host;
1224       if (has_host) {
1225         crm->host = host;
1226       }
1227       crm->method = method;
1228     }
1229     GPR_ASSERT(slots <= UINT32_MAX);
1230     chand->registered_method_slots = static_cast<uint32_t>(slots);
1231     chand->registered_method_max_probes = max_probes;
1232   }
1233
1234   gpr_mu_lock(&s->mu_global);
1235   chand->next = &s->root_channel_data;
1236   chand->prev = chand->next->prev;
1237   chand->next->prev = chand->prev->next = chand;
1238   gpr_mu_unlock(&s->mu_global);
1239
1240   GRPC_CHANNEL_INTERNAL_REF(channel, "connectivity");
1241   op = grpc_make_transport_op(nullptr);
1242   op->set_accept_stream = true;
1243   op->set_accept_stream_fn = accept_stream;
1244   op->set_accept_stream_user_data = chand;
1245   op->on_connectivity_state_change = &chand->channel_connectivity_changed;
1246   op->connectivity_state = &chand->connectivity_state;
1247   if (gpr_atm_acq_load(&s->shutdown_flag) != 0) {
1248     op->disconnect_with_error =
1249         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown");
1250   }
1251   grpc_transport_perform_op(transport, op);
1252 }
1253
1254 void done_published_shutdown(void* done_arg, grpc_cq_completion* storage) {
1255   (void)done_arg;
1256   gpr_free(storage);
1257 }
1258
1259 static void listener_destroy_done(void* s, grpc_error* error) {
1260   grpc_server* server = static_cast<grpc_server*>(s);
1261   gpr_mu_lock(&server->mu_global);
1262   server->listeners_destroyed++;
1263   maybe_finish_shutdown(server);
1264   gpr_mu_unlock(&server->mu_global);
1265 }
1266
1267 /*
1268   - Kills all pending requests-for-incoming-RPC-calls (i.e the requests made via
1269     grpc_server_request_call and grpc_server_request_registered call will now be
1270     cancelled). See 'kill_pending_work_locked()'
1271
1272   - Shuts down the listeners (i.e the server will no longer listen on the port
1273     for new incoming channels).
1274
1275   - Iterates through all channels on the server and sends shutdown msg (see
1276     'channel_broadcaster_shutdown()' for details) to the clients via the
1277     transport layer. The transport layer then guarantees the following:
1278      -- Sends shutdown to the client (for eg: HTTP2 transport sends GOAWAY)
1279      -- If the server has outstanding calls that are in the process, the
1280         connection is NOT closed until the server is done with all those calls
1281      -- Once, there are no more calls in progress, the channel is closed
1282  */
1283 void grpc_server_shutdown_and_notify(grpc_server* server,
1284                                      grpc_completion_queue* cq, void* tag) {
1285   listener* l;
1286   shutdown_tag* sdt;
1287   channel_broadcaster broadcaster;
1288   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1289   grpc_core::ExecCtx exec_ctx;
1290
1291   GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
1292                  (server, cq, tag));
1293
1294   /* wait for startup to be finished: locks mu_global */
1295   gpr_mu_lock(&server->mu_global);
1296   while (server->starting) {
1297     gpr_cv_wait(&server->starting_cv, &server->mu_global,
1298                 gpr_inf_future(GPR_CLOCK_MONOTONIC));
1299   }
1300
1301   /* stay locked, and gather up some stuff to do */
1302   GPR_ASSERT(grpc_cq_begin_op(cq, tag));
1303   if (server->shutdown_published) {
1304     grpc_cq_end_op(cq, tag, GRPC_ERROR_NONE, done_published_shutdown, nullptr,
1305                    static_cast<grpc_cq_completion*>(
1306                        gpr_malloc(sizeof(grpc_cq_completion))));
1307     gpr_mu_unlock(&server->mu_global);
1308     return;
1309   }
1310   server->shutdown_tags = static_cast<shutdown_tag*>(
1311       gpr_realloc(server->shutdown_tags,
1312                   sizeof(shutdown_tag) * (server->num_shutdown_tags + 1)));
1313   sdt = &server->shutdown_tags[server->num_shutdown_tags++];
1314   sdt->tag = tag;
1315   sdt->cq = cq;
1316   if (gpr_atm_acq_load(&server->shutdown_flag)) {
1317     gpr_mu_unlock(&server->mu_global);
1318     return;
1319   }
1320
1321   server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
1322
1323   channel_broadcaster_init(server, &broadcaster);
1324
1325   gpr_atm_rel_store(&server->shutdown_flag, 1);
1326
1327   /* collect all unregistered then registered calls */
1328   gpr_mu_lock(&server->mu_call);
1329   kill_pending_work_locked(
1330       server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
1331   gpr_mu_unlock(&server->mu_call);
1332
1333   maybe_finish_shutdown(server);
1334   gpr_mu_unlock(&server->mu_global);
1335
1336   /* Shutdown listeners */
1337   for (l = server->listeners; l; l = l->next) {
1338     GRPC_CLOSURE_INIT(&l->destroy_done, listener_destroy_done, server,
1339                       grpc_schedule_on_exec_ctx);
1340     l->destroy(server, l->arg, &l->destroy_done);
1341     if (server->channelz_server != nullptr && l->socket_uuid != 0) {
1342       server->channelz_server->RemoveChildListenSocket(l->socket_uuid);
1343     }
1344   }
1345
1346   channel_broadcaster_shutdown(&broadcaster, true /* send_goaway */,
1347                                GRPC_ERROR_NONE);
1348
1349   if (server->default_resource_user != nullptr) {
1350     grpc_resource_quota_unref(
1351         grpc_resource_user_quota(server->default_resource_user));
1352     grpc_resource_user_shutdown(server->default_resource_user);
1353     grpc_resource_user_unref(server->default_resource_user);
1354   }
1355 }
1356
1357 void grpc_server_cancel_all_calls(grpc_server* server) {
1358   channel_broadcaster broadcaster;
1359   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1360   grpc_core::ExecCtx exec_ctx;
1361
1362   GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
1363
1364   gpr_mu_lock(&server->mu_global);
1365   channel_broadcaster_init(server, &broadcaster);
1366   gpr_mu_unlock(&server->mu_global);
1367
1368   channel_broadcaster_shutdown(
1369       &broadcaster, false /* send_goaway */,
1370       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Cancelling all calls"));
1371 }
1372
1373 void grpc_server_destroy(grpc_server* server) {
1374   listener* l;
1375   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1376   grpc_core::ExecCtx exec_ctx;
1377
1378   GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
1379
1380   gpr_mu_lock(&server->mu_global);
1381   GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
1382   GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
1383
1384   while (server->listeners) {
1385     l = server->listeners;
1386     server->listeners = l->next;
1387     gpr_free(l);
1388   }
1389
1390   gpr_mu_unlock(&server->mu_global);
1391
1392   server_unref(server);
1393 }
1394
1395 void grpc_server_add_listener(
1396     grpc_server* server, void* listener_arg,
1397     void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
1398                   size_t pollset_count),
1399     void (*destroy)(grpc_server* server, void* arg, grpc_closure* on_done),
1400     grpc_core::RefCountedPtr<grpc_core::channelz::ListenSocketNode> node) {
1401   listener* l = static_cast<listener*>(gpr_malloc(sizeof(listener)));
1402   l->arg = listener_arg;
1403   l->start = start;
1404   l->destroy = destroy;
1405   l->socket_uuid = 0;
1406   if (node != nullptr) {
1407     l->socket_uuid = node->uuid();
1408     if (server->channelz_server != nullptr) {
1409       server->channelz_server->AddChildListenSocket(std::move(node));
1410     }
1411   }
1412   l->next = server->listeners;
1413   server->listeners = l;
1414 }
1415
1416 static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx,
1417                                           requested_call* rc) {
1418   call_data* calld = nullptr;
1419   request_matcher* rm = nullptr;
1420   if (gpr_atm_acq_load(&server->shutdown_flag)) {
1421     fail_call(server, cq_idx, rc,
1422               GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
1423     return GRPC_CALL_OK;
1424   }
1425   switch (rc->type) {
1426     case BATCH_CALL:
1427       rm = &server->unregistered_request_matcher;
1428       break;
1429     case REGISTERED_CALL:
1430       rm = &rc->data.registered.method->matcher;
1431       break;
1432   }
1433   if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
1434     /* this was the first queued request: we need to lock and start
1435        matching calls */
1436     gpr_mu_lock(&server->mu_call);
1437     while ((calld = rm->pending_head) != nullptr) {
1438       rc = reinterpret_cast<requested_call*>(
1439           gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
1440       if (rc == nullptr) break;
1441       rm->pending_head = calld->pending_next;
1442       gpr_mu_unlock(&server->mu_call);
1443       if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
1444         // Zombied Call
1445         GRPC_CLOSURE_INIT(
1446             &calld->kill_zombie_closure, kill_zombie,
1447             grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
1448             grpc_schedule_on_exec_ctx);
1449         GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
1450       } else {
1451         publish_call(server, calld, cq_idx, rc);
1452       }
1453       gpr_mu_lock(&server->mu_call);
1454     }
1455     gpr_mu_unlock(&server->mu_call);
1456   }
1457   return GRPC_CALL_OK;
1458 }
1459
1460 grpc_call_error grpc_server_request_call(
1461     grpc_server* server, grpc_call** call, grpc_call_details* details,
1462     grpc_metadata_array* initial_metadata,
1463     grpc_completion_queue* cq_bound_to_call,
1464     grpc_completion_queue* cq_for_notification, void* tag) {
1465   grpc_call_error error;
1466   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1467   grpc_core::ExecCtx exec_ctx;
1468   requested_call* rc = static_cast<requested_call*>(gpr_malloc(sizeof(*rc)));
1469   GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
1470   GRPC_API_TRACE(
1471       "grpc_server_request_call("
1472       "server=%p, call=%p, details=%p, initial_metadata=%p, "
1473       "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
1474       7,
1475       (server, call, details, initial_metadata, cq_bound_to_call,
1476        cq_for_notification, tag));
1477   size_t cq_idx;
1478   for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
1479     if (server->cqs[cq_idx] == cq_for_notification) {
1480       break;
1481     }
1482   }
1483   if (cq_idx == server->cq_count) {
1484     gpr_free(rc);
1485     error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
1486     goto done;
1487   }
1488   if (grpc_cq_begin_op(cq_for_notification, tag) == false) {
1489     gpr_free(rc);
1490     error = GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
1491     goto done;
1492   }
1493   details->reserved = nullptr;
1494   rc->cq_idx = cq_idx;
1495   rc->type = BATCH_CALL;
1496   rc->server = server;
1497   rc->tag = tag;
1498   rc->cq_bound_to_call = cq_bound_to_call;
1499   rc->call = call;
1500   rc->data.batch.details = details;
1501   rc->initial_metadata = initial_metadata;
1502   error = queue_call_request(server, cq_idx, rc);
1503 done:
1504
1505   return error;
1506 }
1507
1508 grpc_call_error grpc_server_request_registered_call(
1509     grpc_server* server, void* rmp, grpc_call** call, gpr_timespec* deadline,
1510     grpc_metadata_array* initial_metadata, grpc_byte_buffer** optional_payload,
1511     grpc_completion_queue* cq_bound_to_call,
1512     grpc_completion_queue* cq_for_notification, void* tag) {
1513   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1514   grpc_core::ExecCtx exec_ctx;
1515   GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
1516   requested_call* rc = static_cast<requested_call*>(gpr_malloc(sizeof(*rc)));
1517   registered_method* rm = static_cast<registered_method*>(rmp);
1518   GRPC_API_TRACE(
1519       "grpc_server_request_registered_call("
1520       "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
1521       "optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
1522       "tag=%p)",
1523       9,
1524       (server, rmp, call, deadline, initial_metadata, optional_payload,
1525        cq_bound_to_call, cq_for_notification, tag));
1526
1527   size_t cq_idx;
1528   for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
1529     if (server->cqs[cq_idx] == cq_for_notification) {
1530       break;
1531     }
1532   }
1533   if (cq_idx == server->cq_count) {
1534     gpr_free(rc);
1535     return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
1536   }
1537   if ((optional_payload == nullptr) !=
1538       (rm->payload_handling == GRPC_SRM_PAYLOAD_NONE)) {
1539     gpr_free(rc);
1540     return GRPC_CALL_ERROR_PAYLOAD_TYPE_MISMATCH;
1541   }
1542
1543   if (grpc_cq_begin_op(cq_for_notification, tag) == false) {
1544     gpr_free(rc);
1545     return GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
1546   }
1547   rc->cq_idx = cq_idx;
1548   rc->type = REGISTERED_CALL;
1549   rc->server = server;
1550   rc->tag = tag;
1551   rc->cq_bound_to_call = cq_bound_to_call;
1552   rc->call = call;
1553   rc->data.registered.method = rm;
1554   rc->data.registered.deadline = deadline;
1555   rc->initial_metadata = initial_metadata;
1556   rc->data.registered.optional_payload = optional_payload;
1557   return queue_call_request(server, cq_idx, rc);
1558 }
1559
1560 static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc,
1561                       grpc_error* error) {
1562   *rc->call = nullptr;
1563   rc->initial_metadata->count = 0;
1564   GPR_ASSERT(error != GRPC_ERROR_NONE);
1565
1566   grpc_cq_end_op(server->cqs[cq_idx], rc->tag, error, done_request_event, rc,
1567                  &rc->completion);
1568 }
1569
1570 const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server) {
1571   return server->channel_args;
1572 }
1573
1574 grpc_resource_user* grpc_server_get_default_resource_user(grpc_server* server) {
1575   return server->default_resource_user;
1576 }
1577
1578 int grpc_server_has_open_connections(grpc_server* server) {
1579   int r;
1580   gpr_mu_lock(&server->mu_global);
1581   r = server->root_channel_data.next != &server->root_channel_data;
1582   gpr_mu_unlock(&server->mu_global);
1583   return r;
1584 }
1585
1586 grpc_core::channelz::ServerNode* grpc_server_get_channelz_node(
1587     grpc_server* server) {
1588   if (server == nullptr) {
1589     return nullptr;
1590   }
1591   return server->channelz_server.get();
1592 }