3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
21 #include "src/core/lib/transport/metadata_batch.h"
26 #include <grpc/support/alloc.h>
27 #include <grpc/support/log.h>
29 #include "src/core/lib/profiling/timers.h"
30 #include "src/core/lib/slice/slice_internal.h"
31 #include "src/core/lib/slice/slice_string_helpers.h"
33 static void assert_valid_list(grpc_mdelem_list* list) {
35 grpc_linked_mdelem* l;
37 GPR_ASSERT((list->head == nullptr) == (list->tail == nullptr));
38 if (!list->head) return;
39 GPR_ASSERT(list->head->prev == nullptr);
40 GPR_ASSERT(list->tail->next == nullptr);
41 GPR_ASSERT((list->head == list->tail) == (list->head->next == nullptr));
43 size_t verified_count = 0;
44 for (l = list->head; l; l = l->next) {
45 GPR_ASSERT(!GRPC_MDISNULL(l->md));
46 GPR_ASSERT((l->prev == nullptr) == (l == list->head));
47 GPR_ASSERT((l->next == nullptr) == (l == list->tail));
48 if (l->next) GPR_ASSERT(l->next->prev == l);
49 if (l->prev) GPR_ASSERT(l->prev->next == l);
52 GPR_ASSERT(list->count == verified_count);
56 static void assert_valid_callouts(grpc_metadata_batch* batch) {
58 for (grpc_linked_mdelem* l = batch->list.head; l != nullptr; l = l->next) {
59 grpc_slice key_interned = grpc_slice_intern(GRPC_MDKEY(l->md));
60 grpc_metadata_batch_callouts_index callout_idx =
61 GRPC_BATCH_INDEX_OF(key_interned);
62 if (callout_idx != GRPC_BATCH_CALLOUTS_COUNT) {
63 GPR_ASSERT(batch->idx.array[callout_idx] == l);
65 grpc_slice_unref_internal(key_interned);
71 void grpc_metadata_batch_assert_ok(grpc_metadata_batch* batch) {
72 assert_valid_list(&batch->list);
76 void grpc_metadata_batch_init(grpc_metadata_batch* batch) {
77 memset(batch, 0, sizeof(*batch));
78 batch->deadline = GRPC_MILLIS_INF_FUTURE;
81 void grpc_metadata_batch_destroy(grpc_metadata_batch* batch) {
82 grpc_linked_mdelem* l;
83 for (l = batch->list.head; l; l = l->next) {
84 GRPC_MDELEM_UNREF(l->md);
88 grpc_error* grpc_attach_md_to_error(grpc_error* src, grpc_mdelem md) {
89 grpc_error* out = grpc_error_set_str(
90 grpc_error_set_str(src, GRPC_ERROR_STR_KEY,
91 grpc_slice_ref_internal(GRPC_MDKEY(md))),
92 GRPC_ERROR_STR_VALUE, grpc_slice_ref_internal(GRPC_MDVALUE(md)));
96 static grpc_error* GPR_ATTRIBUTE_NOINLINE error_with_md(grpc_mdelem md) {
97 return grpc_attach_md_to_error(
98 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unallowed duplicate metadata"), md);
101 static grpc_error* link_callout(grpc_metadata_batch* batch,
102 grpc_linked_mdelem* storage,
103 grpc_metadata_batch_callouts_index idx) {
104 GPR_DEBUG_ASSERT(idx >= 0 && idx < GRPC_BATCH_CALLOUTS_COUNT);
105 if (GPR_LIKELY(batch->idx.array[idx] == nullptr)) {
106 ++batch->list.default_count;
107 batch->idx.array[idx] = storage;
108 return GRPC_ERROR_NONE;
110 return error_with_md(storage->md);
113 static grpc_error* maybe_link_callout(grpc_metadata_batch* batch,
114 grpc_linked_mdelem* storage)
115 GRPC_MUST_USE_RESULT;
117 static grpc_error* maybe_link_callout(grpc_metadata_batch* batch,
118 grpc_linked_mdelem* storage) {
119 grpc_metadata_batch_callouts_index idx =
120 GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md));
121 if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
122 return GRPC_ERROR_NONE;
124 return link_callout(batch, storage, idx);
127 static void maybe_unlink_callout(grpc_metadata_batch* batch,
128 grpc_linked_mdelem* storage) {
129 grpc_metadata_batch_callouts_index idx =
130 GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md));
131 if (idx == GRPC_BATCH_CALLOUTS_COUNT) {
134 --batch->list.default_count;
135 GPR_DEBUG_ASSERT(batch->idx.array[idx] != nullptr);
136 batch->idx.array[idx] = nullptr;
139 grpc_error* grpc_metadata_batch_add_head(grpc_metadata_batch* batch,
140 grpc_linked_mdelem* storage,
141 grpc_mdelem elem_to_add) {
142 GPR_DEBUG_ASSERT(!GRPC_MDISNULL(elem_to_add));
143 storage->md = elem_to_add;
144 return grpc_metadata_batch_link_head(batch, storage);
147 static void link_head(grpc_mdelem_list* list, grpc_linked_mdelem* storage) {
148 assert_valid_list(list);
149 GPR_DEBUG_ASSERT(!GRPC_MDISNULL(storage->md));
150 storage->prev = nullptr;
151 storage->next = list->head;
152 storage->reserved = nullptr;
153 if (list->head != nullptr) {
154 list->head->prev = storage;
156 list->tail = storage;
158 list->head = storage;
160 assert_valid_list(list);
163 grpc_error* grpc_metadata_batch_link_head(grpc_metadata_batch* batch,
164 grpc_linked_mdelem* storage) {
165 assert_valid_callouts(batch);
166 grpc_error* err = maybe_link_callout(batch, storage);
167 if (err != GRPC_ERROR_NONE) {
168 assert_valid_callouts(batch);
171 link_head(&batch->list, storage);
172 assert_valid_callouts(batch);
173 return GRPC_ERROR_NONE;
176 // TODO(arjunroy): Need to revisit this and see what guarantees exist between
177 // C-core and the internal-metadata subsystem. E.g. can we ensure a particular
178 // metadata is never added twice, even in the presence of user supplied data?
179 grpc_error* grpc_metadata_batch_link_head(
180 grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
181 grpc_metadata_batch_callouts_index idx) {
182 GPR_DEBUG_ASSERT(GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md)) == idx);
183 assert_valid_callouts(batch);
184 grpc_error* err = link_callout(batch, storage, idx);
185 if (GPR_UNLIKELY(err != GRPC_ERROR_NONE)) {
186 assert_valid_callouts(batch);
189 link_head(&batch->list, storage);
190 assert_valid_callouts(batch);
191 return GRPC_ERROR_NONE;
194 grpc_error* grpc_metadata_batch_add_tail(grpc_metadata_batch* batch,
195 grpc_linked_mdelem* storage,
196 grpc_mdelem elem_to_add) {
197 GPR_DEBUG_ASSERT(!GRPC_MDISNULL(elem_to_add));
198 storage->md = elem_to_add;
199 return grpc_metadata_batch_link_tail(batch, storage);
202 static void link_tail(grpc_mdelem_list* list, grpc_linked_mdelem* storage) {
203 assert_valid_list(list);
204 GPR_DEBUG_ASSERT(!GRPC_MDISNULL(storage->md));
205 storage->prev = list->tail;
206 storage->next = nullptr;
207 storage->reserved = nullptr;
208 if (list->tail != nullptr) {
209 list->tail->next = storage;
211 list->head = storage;
213 list->tail = storage;
215 assert_valid_list(list);
218 grpc_error* grpc_metadata_batch_link_tail(grpc_metadata_batch* batch,
219 grpc_linked_mdelem* storage) {
220 assert_valid_callouts(batch);
221 grpc_error* err = maybe_link_callout(batch, storage);
222 if (err != GRPC_ERROR_NONE) {
223 assert_valid_callouts(batch);
226 link_tail(&batch->list, storage);
227 assert_valid_callouts(batch);
228 return GRPC_ERROR_NONE;
231 grpc_error* grpc_metadata_batch_link_tail(
232 grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
233 grpc_metadata_batch_callouts_index idx) {
234 GPR_DEBUG_ASSERT(GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md)) == idx);
235 assert_valid_callouts(batch);
236 grpc_error* err = link_callout(batch, storage, idx);
237 if (GPR_UNLIKELY(err != GRPC_ERROR_NONE)) {
238 assert_valid_callouts(batch);
241 link_tail(&batch->list, storage);
242 assert_valid_callouts(batch);
243 return GRPC_ERROR_NONE;
246 static void unlink_storage(grpc_mdelem_list* list,
247 grpc_linked_mdelem* storage) {
248 assert_valid_list(list);
249 if (storage->prev != nullptr) {
250 storage->prev->next = storage->next;
252 list->head = storage->next;
254 if (storage->next != nullptr) {
255 storage->next->prev = storage->prev;
257 list->tail = storage->prev;
260 assert_valid_list(list);
263 void grpc_metadata_batch_remove(grpc_metadata_batch* batch,
264 grpc_linked_mdelem* storage) {
265 assert_valid_callouts(batch);
266 maybe_unlink_callout(batch, storage);
267 unlink_storage(&batch->list, storage);
268 GRPC_MDELEM_UNREF(storage->md);
269 assert_valid_callouts(batch);
272 void grpc_metadata_batch_remove(grpc_metadata_batch* batch,
273 grpc_metadata_batch_callouts_index idx) {
274 assert_valid_callouts(batch);
275 grpc_linked_mdelem* storage = batch->idx.array[idx];
276 GPR_DEBUG_ASSERT(storage != nullptr);
277 --batch->list.default_count;
278 batch->idx.array[idx] = nullptr;
279 unlink_storage(&batch->list, storage);
280 GRPC_MDELEM_UNREF(storage->md);
281 assert_valid_callouts(batch);
284 void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage,
285 const grpc_slice& value) {
286 grpc_mdelem old_mdelem = storage->md;
287 grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
288 grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);
289 storage->md = new_mdelem;
290 GRPC_MDELEM_UNREF(old_mdelem);
293 grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch,
294 grpc_linked_mdelem* storage,
295 grpc_mdelem new_mdelem) {
296 assert_valid_callouts(batch);
297 grpc_error* error = GRPC_ERROR_NONE;
298 grpc_mdelem old_mdelem = storage->md;
299 if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) {
300 maybe_unlink_callout(batch, storage);
301 storage->md = new_mdelem;
302 error = maybe_link_callout(batch, storage);
303 if (error != GRPC_ERROR_NONE) {
304 unlink_storage(&batch->list, storage);
305 GRPC_MDELEM_UNREF(storage->md);
308 storage->md = new_mdelem;
310 GRPC_MDELEM_UNREF(old_mdelem);
311 assert_valid_callouts(batch);
315 void grpc_metadata_batch_clear(grpc_metadata_batch* batch) {
316 grpc_metadata_batch_destroy(batch);
317 grpc_metadata_batch_init(batch);
320 bool grpc_metadata_batch_is_empty(grpc_metadata_batch* batch) {
321 return batch->list.head == nullptr &&
322 batch->deadline == GRPC_MILLIS_INF_FUTURE;
325 size_t grpc_metadata_batch_size(grpc_metadata_batch* batch) {
327 for (grpc_linked_mdelem* elem = batch->list.head; elem != nullptr;
329 size += GRPC_MDELEM_LENGTH(elem->md);
334 static void add_error(grpc_error** composite, grpc_error* error,
335 const char* composite_error_string) {
336 if (error == GRPC_ERROR_NONE) return;
337 if (*composite == GRPC_ERROR_NONE) {
338 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(composite_error_string);
340 *composite = grpc_error_add_child(*composite, error);
343 grpc_error* grpc_metadata_batch_filter(grpc_metadata_batch* batch,
344 grpc_metadata_batch_filter_func func,
346 const char* composite_error_string) {
347 grpc_linked_mdelem* l = batch->list.head;
348 grpc_error* error = GRPC_ERROR_NONE;
350 grpc_linked_mdelem* next = l->next;
351 grpc_filtered_mdelem new_mdelem = func(user_data, l->md);
352 add_error(&error, new_mdelem.error, composite_error_string);
353 if (GRPC_MDISNULL(new_mdelem.md)) {
354 grpc_metadata_batch_remove(batch, l);
355 } else if (new_mdelem.md.payload != l->md.payload) {
356 grpc_metadata_batch_substitute(batch, l, new_mdelem.md);
363 void grpc_metadata_batch_copy(grpc_metadata_batch* src,
364 grpc_metadata_batch* dst,
365 grpc_linked_mdelem* storage) {
366 grpc_metadata_batch_init(dst);
367 dst->deadline = src->deadline;
369 for (grpc_linked_mdelem* elem = src->list.head; elem != nullptr;
371 // Error unused in non-debug builds.
372 grpc_error* GRPC_UNUSED error = grpc_metadata_batch_add_tail(
373 dst, &storage[i++], GRPC_MDELEM_REF(elem->md));
374 // The only way that grpc_metadata_batch_add_tail() can fail is if
375 // there's a duplicate entry for a callout. However, that can't be
376 // the case here, because we would not have been allowed to create
377 // a source batch that had that kind of conflict.
378 GPR_DEBUG_ASSERT(error == GRPC_ERROR_NONE);
382 void grpc_metadata_batch_move(grpc_metadata_batch* src,
383 grpc_metadata_batch* dst) {
385 grpc_metadata_batch_init(src);