3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
21 #include "src/core/lib/transport/metadata.h"
28 #include <grpc/compression.h>
29 #include <grpc/grpc.h>
30 #include <grpc/support/alloc.h>
31 #include <grpc/support/atm.h>
32 #include <grpc/support/log.h>
33 #include <grpc/support/string_util.h>
34 #include <grpc/support/time.h>
36 #include "src/core/lib/gpr/murmur_hash.h"
37 #include "src/core/lib/gpr/string.h"
38 #include "src/core/lib/iomgr/iomgr_internal.h"
39 #include "src/core/lib/profiling/timers.h"
40 #include "src/core/lib/slice/slice_internal.h"
41 #include "src/core/lib/slice/slice_string_helpers.h"
42 #include "src/core/lib/transport/static_metadata.h"
44 using grpc_core::AllocatedMetadata;
45 using grpc_core::InternedMetadata;
46 using grpc_core::StaticMetadata;
47 using grpc_core::UserData;
49 /* There are two kinds of mdelem and mdstr instances.
50 * Static instances are declared in static_metadata.{h,c} and
51 * are initialized by grpc_mdctx_global_init().
52 * Dynamic instances are stored in hash tables on grpc_mdctx, and are backed
53 * by internal_string and internal_element structures.
54 * Internal helper functions here-in (is_mdstr_static, is_mdelem_static) are
55 * used to determine which kind of element a pointer refers to.
58 grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata");
61 #define DEBUG_ARGS , const char *file, int line
62 #define FWD_DEBUG_ARGS file, line
64 void grpc_mdelem_trace_ref(void* md, const grpc_slice& key,
65 const grpc_slice& value, intptr_t refcnt,
66 const char* file, int line) {
67 if (grpc_trace_metadata.enabled()) {
68 char* key_str = grpc_slice_to_c_string(key);
69 char* value_str = grpc_slice_to_c_string(value);
70 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
71 "mdelem REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
72 refcnt, refcnt + 1, key_str, value_str);
78 void grpc_mdelem_trace_unref(void* md, const grpc_slice& key,
79 const grpc_slice& value, intptr_t refcnt,
80 const char* file, int line) {
81 if (grpc_trace_metadata.enabled()) {
82 char* key_str = grpc_slice_to_c_string(key);
83 char* value_str = grpc_slice_to_c_string(value);
84 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
85 "mdelem UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
86 refcnt, refcnt - 1, key_str, value_str);
92 #else // ifndef NDEBUG
94 #define FWD_DEBUG_ARGS
95 #endif // ifndef NDEBUG
97 #define INITIAL_SHARD_CAPACITY 8
98 #define LOG2_SHARD_COUNT 4
99 #define SHARD_COUNT ((size_t)(1 << LOG2_SHARD_COUNT))
101 #define TABLE_IDX(hash, capacity) (((hash) >> (LOG2_SHARD_COUNT)) % (capacity))
102 #define SHARD_IDX(hash) ((hash) & ((1 << (LOG2_SHARD_COUNT)) - 1))
104 void StaticMetadata::HashInit() {
105 uint32_t k_hash = grpc_slice_hash_internal(kv_.key);
106 uint32_t v_hash = grpc_slice_hash_internal(kv_.value);
107 hash_ = GRPC_MDSTR_KV_HASH(k_hash, v_hash);
110 AllocatedMetadata::AllocatedMetadata(const grpc_slice& key,
111 const grpc_slice& value)
112 : RefcountedMdBase(grpc_slice_ref_internal(key),
113 grpc_slice_ref_internal(value)) {
115 TraceAtStart("ALLOC_MD");
119 AllocatedMetadata::AllocatedMetadata(const grpc_slice& key,
120 const grpc_slice& value, const NoRefKey*)
121 : RefcountedMdBase(key, grpc_slice_ref_internal(value)) {
123 TraceAtStart("ALLOC_MD_NOREF_KEY");
127 AllocatedMetadata::AllocatedMetadata(
128 const grpc_core::ManagedMemorySlice& key,
129 const grpc_core::UnmanagedMemorySlice& value)
130 : RefcountedMdBase(key, value) {
132 TraceAtStart("ALLOC_MD_NOREF_KEY_VAL");
136 AllocatedMetadata::AllocatedMetadata(
137 const grpc_core::ExternallyManagedSlice& key,
138 const grpc_core::UnmanagedMemorySlice& value)
139 : RefcountedMdBase(key, value) {
141 TraceAtStart("ALLOC_MD_NOREF_KEY_VAL");
145 AllocatedMetadata::~AllocatedMetadata() {
146 grpc_slice_unref_internal(key());
147 grpc_slice_unref_internal(value());
148 void* user_data = user_data_.data.Load(grpc_core::MemoryOrder::RELAXED);
150 destroy_user_data_func destroy_user_data =
151 user_data_.destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED);
152 destroy_user_data(user_data);
157 void grpc_core::RefcountedMdBase::TraceAtStart(const char* tag) {
158 if (grpc_trace_metadata.enabled()) {
159 char* key_str = grpc_slice_to_c_string(key());
160 char* value_str = grpc_slice_to_c_string(value());
161 gpr_log(GPR_DEBUG, "mdelem %s:%p:%" PRIdPTR ": '%s' = '%s'", tag, this,
162 RefValue(), key_str, value_str);
169 InternedMetadata::InternedMetadata(const grpc_slice& key,
170 const grpc_slice& value, uint32_t hash,
171 InternedMetadata* next)
172 : RefcountedMdBase(grpc_slice_ref_internal(key),
173 grpc_slice_ref_internal(value), hash),
176 TraceAtStart("INTERNED_MD");
180 InternedMetadata::InternedMetadata(const grpc_slice& key,
181 const grpc_slice& value, uint32_t hash,
182 InternedMetadata* next, const NoRefKey*)
183 : RefcountedMdBase(key, grpc_slice_ref_internal(value), hash), link_(next) {
185 TraceAtStart("INTERNED_MD_NOREF_KEY");
189 InternedMetadata::~InternedMetadata() {
190 grpc_slice_unref_internal(key());
191 grpc_slice_unref_internal(value());
192 void* user_data = user_data_.data.Load(grpc_core::MemoryOrder::RELAXED);
194 destroy_user_data_func destroy_user_data =
195 user_data_.destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED);
196 destroy_user_data(user_data);
200 size_t InternedMetadata::CleanupLinkedMetadata(
201 InternedMetadata::BucketLink* head) {
202 size_t num_freed = 0;
203 InternedMetadata::BucketLink* prev_next = head;
204 InternedMetadata *md, *next;
206 for (md = head->next; md; md = next) {
207 next = md->link_.next;
208 if (md->AllRefsDropped()) {
209 prev_next->next = next;
210 grpc_core::Delete(md);
213 prev_next = &md->link_;
219 typedef struct mdtab_shard {
221 InternedMetadata::BucketLink* elems;
224 /** Estimate of the number of unreferenced mdelems in the hash table.
225 This will eventually converge to the exact number, but it's instantaneous
226 accuracy is not guaranteed */
227 gpr_atm free_estimate;
230 static mdtab_shard g_shards[SHARD_COUNT];
232 static void gc_mdtab(mdtab_shard* shard);
234 void grpc_mdctx_global_init(void) {
235 /* initialize shards */
236 for (size_t i = 0; i < SHARD_COUNT; i++) {
237 mdtab_shard* shard = &g_shards[i];
238 gpr_mu_init(&shard->mu);
240 gpr_atm_no_barrier_store(&shard->free_estimate, 0);
241 shard->capacity = INITIAL_SHARD_CAPACITY;
242 shard->elems = static_cast<InternedMetadata::BucketLink*>(
243 gpr_zalloc(sizeof(*shard->elems) * shard->capacity));
247 void grpc_mdctx_global_shutdown() {
248 for (size_t i = 0; i < SHARD_COUNT; i++) {
249 mdtab_shard* shard = &g_shards[i];
250 gpr_mu_destroy(&shard->mu);
252 if (shard->count != 0) {
253 gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata elements were leaked",
255 if (grpc_iomgr_abort_on_leaks()) {
259 // For ASAN builds, we don't want to crash here, because that will
260 // prevent ASAN from providing leak detection information, which is
261 // far more useful than this simple assertion.
262 #ifndef GRPC_ASAN_ENABLED
263 GPR_DEBUG_ASSERT(shard->count == 0);
265 gpr_free(shard->elems);
270 static int is_mdelem_static(grpc_mdelem e) {
271 return reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(e)) >=
272 &grpc_static_mdelem_table()[0] &&
273 reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(e)) <
274 &grpc_static_mdelem_table()[GRPC_STATIC_MDELEM_COUNT];
278 void InternedMetadata::RefWithShardLocked(mdtab_shard* shard) {
280 if (grpc_trace_metadata.enabled()) {
281 char* key_str = grpc_slice_to_c_string(key());
282 char* value_str = grpc_slice_to_c_string(value());
283 intptr_t value = RefValue();
284 gpr_log(__FILE__, __LINE__, GPR_LOG_SEVERITY_DEBUG,
285 "mdelem REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", this,
286 value, value + 1, key_str, value_str);
292 gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -1);
296 static void gc_mdtab(mdtab_shard* shard) {
297 GPR_TIMER_SCOPE("gc_mdtab", 0);
298 size_t num_freed = 0;
299 for (size_t i = 0; i < shard->capacity; ++i) {
300 intptr_t freed = InternedMetadata::CleanupLinkedMetadata(&shard->elems[i]);
302 shard->count -= freed;
304 gpr_atm_no_barrier_fetch_add(&shard->free_estimate,
305 -static_cast<intptr_t>(num_freed));
308 static void grow_mdtab(mdtab_shard* shard) {
309 GPR_TIMER_SCOPE("grow_mdtab", 0);
311 size_t capacity = shard->capacity * 2;
313 InternedMetadata::BucketLink* mdtab;
314 InternedMetadata *md, *next;
317 mdtab = static_cast<InternedMetadata::BucketLink*>(
318 gpr_zalloc(sizeof(InternedMetadata::BucketLink) * capacity));
320 for (i = 0; i < shard->capacity; i++) {
321 for (md = shard->elems[i].next; md; md = next) {
324 next = md->bucket_next();
325 idx = TABLE_IDX(hash, capacity);
326 md->set_bucket_next(mdtab[idx].next);
327 mdtab[idx].next = md;
330 gpr_free(shard->elems);
331 shard->elems = mdtab;
332 shard->capacity = capacity;
335 static void rehash_mdtab(mdtab_shard* shard) {
336 if (gpr_atm_no_barrier_load(&shard->free_estimate) >
337 static_cast<gpr_atm>(shard->capacity / 4)) {
344 template <bool key_definitely_static, bool value_definitely_static = false>
345 static grpc_mdelem md_create_maybe_static(const grpc_slice& key,
346 const grpc_slice& value);
347 template <bool key_definitely_static>
348 static grpc_mdelem md_create_must_intern(const grpc_slice& key,
349 const grpc_slice& value,
352 template <bool key_definitely_static, bool value_definitely_static = false>
353 static grpc_mdelem md_create(
354 const grpc_slice& key, const grpc_slice& value,
355 grpc_mdelem_data* compatible_external_backing_store) {
356 // Ensure slices are, in fact, static if we claimed they were.
357 GPR_DEBUG_ASSERT(!key_definitely_static ||
358 GRPC_IS_STATIC_METADATA_STRING(key));
359 GPR_DEBUG_ASSERT(!value_definitely_static ||
360 GRPC_IS_STATIC_METADATA_STRING(value));
361 const bool key_is_interned =
362 key_definitely_static || grpc_slice_is_interned(key);
363 const bool value_is_interned =
364 value_definitely_static || grpc_slice_is_interned(value);
365 // External storage if either slice is not interned and the caller already
366 // created a backing store. If no backing store, we allocate one.
367 if (!key_is_interned || !value_is_interned) {
368 if (compatible_external_backing_store != nullptr) {
369 // Caller provided backing store.
370 return GRPC_MAKE_MDELEM(compatible_external_backing_store,
371 GRPC_MDELEM_STORAGE_EXTERNAL);
373 // We allocate backing store.
374 return key_definitely_static
376 grpc_core::New<AllocatedMetadata>(
378 static_cast<const AllocatedMetadata::NoRefKey*>(
380 GRPC_MDELEM_STORAGE_ALLOCATED)
382 grpc_core::New<AllocatedMetadata>(key, value),
383 GRPC_MDELEM_STORAGE_ALLOCATED);
386 return md_create_maybe_static<key_definitely_static, value_definitely_static>(
390 template <bool key_definitely_static, bool value_definitely_static>
391 static grpc_mdelem md_create_maybe_static(const grpc_slice& key,
392 const grpc_slice& value) {
393 // Ensure slices are, in fact, static if we claimed they were.
394 GPR_DEBUG_ASSERT(!key_definitely_static ||
395 GRPC_IS_STATIC_METADATA_STRING(key));
396 GPR_DEBUG_ASSERT(!value_definitely_static ||
397 GRPC_IS_STATIC_METADATA_STRING(value));
398 GPR_DEBUG_ASSERT(key.refcount != nullptr);
399 GPR_DEBUG_ASSERT(value.refcount != nullptr);
401 const bool key_is_static_mdstr =
402 key_definitely_static ||
403 key.refcount->GetType() == grpc_slice_refcount::Type::STATIC;
404 const bool value_is_static_mdstr =
405 value_definitely_static ||
406 value.refcount->GetType() == grpc_slice_refcount::Type::STATIC;
408 const intptr_t kidx = GRPC_STATIC_METADATA_INDEX(key);
410 // Not all static slice input yields a statically stored metadata element.
411 if (key_is_static_mdstr && value_is_static_mdstr) {
412 grpc_mdelem static_elem = grpc_static_mdelem_for_static_strings(
413 kidx, GRPC_STATIC_METADATA_INDEX(value));
414 if (!GRPC_MDISNULL(static_elem)) {
419 uint32_t khash = key_definitely_static
420 ? grpc_static_metadata_hash_values[kidx]
421 : grpc_slice_hash_refcounted(key);
423 uint32_t hash = GRPC_MDSTR_KV_HASH(khash, grpc_slice_hash_refcounted(value));
424 return md_create_must_intern<key_definitely_static>(key, value, hash);
427 template <bool key_definitely_static>
428 static grpc_mdelem md_create_must_intern(const grpc_slice& key,
429 const grpc_slice& value,
431 // Here, we know both key and value are both at least interned, and both
432 // possibly static. We know that anything inside the shared interned table is
433 // also at least interned (and maybe static). Note that equality for a static
434 // and interned slice implies that they are both the same exact slice.
435 // The same applies to a pair of interned slices, or a pair of static slices.
436 // Rather than run the full equality check, we can therefore just do a pointer
437 // comparison of the refcounts.
438 InternedMetadata* md;
439 mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
442 GPR_TIMER_SCOPE("grpc_mdelem_from_metadata_strings", 0);
444 gpr_mu_lock(&shard->mu);
446 idx = TABLE_IDX(hash, shard->capacity);
447 /* search for an existing pair */
448 for (md = shard->elems[idx].next; md; md = md->bucket_next()) {
449 if (grpc_slice_static_interned_equal(key, md->key()) &&
450 grpc_slice_static_interned_equal(value, md->value())) {
451 md->RefWithShardLocked(shard);
452 gpr_mu_unlock(&shard->mu);
453 return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
457 /* not found: create a new pair */
458 md = key_definitely_static
459 ? grpc_core::New<InternedMetadata>(
460 key, value, hash, shard->elems[idx].next,
461 static_cast<const InternedMetadata::NoRefKey*>(nullptr))
462 : grpc_core::New<InternedMetadata>(key, value, hash,
463 shard->elems[idx].next);
464 shard->elems[idx].next = md;
467 if (shard->count > shard->capacity * 2) {
471 gpr_mu_unlock(&shard->mu);
473 return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
476 grpc_mdelem grpc_mdelem_create(
477 const grpc_slice& key, const grpc_slice& value,
478 grpc_mdelem_data* compatible_external_backing_store) {
479 return md_create<false>(key, value, compatible_external_backing_store);
482 grpc_mdelem grpc_mdelem_create(
483 const grpc_core::StaticMetadataSlice& key, const grpc_slice& value,
484 grpc_mdelem_data* compatible_external_backing_store) {
485 return md_create<true>(key, value, compatible_external_backing_store);
488 /* Create grpc_mdelem from provided slices. We specify via template parameter
489 whether we know that the input key is static or not. If it is, we short
490 circuit various comparisons and a no-op unref. */
491 template <bool key_definitely_static>
492 static grpc_mdelem md_from_slices(const grpc_slice& key,
493 const grpc_slice& value) {
494 // Ensure key is, in fact, static if we claimed it was.
495 GPR_DEBUG_ASSERT(!key_definitely_static ||
496 GRPC_IS_STATIC_METADATA_STRING(key));
497 grpc_mdelem out = md_create<key_definitely_static>(key, value, nullptr);
498 if (!key_definitely_static) {
499 grpc_slice_unref_internal(key);
501 grpc_slice_unref_internal(value);
505 grpc_mdelem grpc_mdelem_from_slices(const grpc_slice& key,
506 const grpc_slice& value) {
507 return md_from_slices</*key_definitely_static=*/false>(key, value);
510 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
511 const grpc_slice& value) {
512 return md_from_slices</*key_definitely_static=*/true>(key, value);
515 grpc_mdelem grpc_mdelem_from_slices(
516 const grpc_core::StaticMetadataSlice& key,
517 const grpc_core::StaticMetadataSlice& value) {
518 grpc_mdelem out = md_create_maybe_static<true, true>(key, value);
522 grpc_mdelem grpc_mdelem_from_slices(
523 const grpc_core::StaticMetadataSlice& key,
524 const grpc_core::ManagedMemorySlice& value) {
525 // TODO(arjunroy): We can save the unref if md_create_maybe_static ended up
526 // creating a new interned metadata. But otherwise - we need this here.
527 grpc_mdelem out = md_create_maybe_static<true>(key, value);
528 grpc_slice_unref_internal(value);
532 grpc_mdelem grpc_mdelem_from_slices(
533 const grpc_core::ManagedMemorySlice& key,
534 const grpc_core::ManagedMemorySlice& value) {
535 grpc_mdelem out = md_create_maybe_static<false>(key, value);
536 // TODO(arjunroy): We can save the unref if md_create_maybe_static ended up
537 // creating a new interned metadata. But otherwise - we need this here.
538 grpc_slice_unref_internal(key);
539 grpc_slice_unref_internal(value);
543 grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata) {
544 bool changed = false;
545 grpc_slice key_slice =
546 grpc_slice_maybe_static_intern(metadata->key, &changed);
547 grpc_slice value_slice =
548 grpc_slice_maybe_static_intern(metadata->value, &changed);
549 return grpc_mdelem_create(
550 key_slice, value_slice,
551 changed ? nullptr : reinterpret_cast<grpc_mdelem_data*>(metadata));
554 static void* get_user_data(UserData* user_data, void (*destroy_func)(void*)) {
555 if (user_data->destroy_user_data.Load(grpc_core::MemoryOrder::ACQUIRE) ==
557 return user_data->data.Load(grpc_core::MemoryOrder::RELAXED);
563 void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) {
564 switch (GRPC_MDELEM_STORAGE(md)) {
565 case GRPC_MDELEM_STORAGE_EXTERNAL:
567 case GRPC_MDELEM_STORAGE_STATIC:
568 return reinterpret_cast<void*>(
569 grpc_static_mdelem_user_data
570 [reinterpret_cast<grpc_core::StaticMetadata*>(
571 GRPC_MDELEM_DATA(md)) -
572 grpc_static_mdelem_table()]);
573 case GRPC_MDELEM_STORAGE_ALLOCATED: {
574 auto* am = reinterpret_cast<AllocatedMetadata*>(GRPC_MDELEM_DATA(md));
575 return get_user_data(am->user_data(), destroy_func);
577 case GRPC_MDELEM_STORAGE_INTERNED: {
578 auto* im = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(md);
579 return get_user_data(im->user_data(), destroy_func);
582 GPR_UNREACHABLE_CODE(return nullptr);
585 static void* set_user_data(UserData* ud, void (*destroy_func)(void*),
587 GPR_ASSERT((data == nullptr) == (destroy_func == nullptr));
588 grpc_core::ReleasableMutexLock lock(&ud->mu_user_data);
589 if (ud->destroy_user_data.Load(grpc_core::MemoryOrder::RELAXED)) {
590 /* user data can only be set once */
592 if (destroy_func != nullptr) {
595 return ud->data.Load(grpc_core::MemoryOrder::RELAXED);
597 ud->data.Store(data, grpc_core::MemoryOrder::RELAXED);
598 ud->destroy_user_data.Store(destroy_func, grpc_core::MemoryOrder::RELEASE);
602 void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
604 switch (GRPC_MDELEM_STORAGE(md)) {
605 case GRPC_MDELEM_STORAGE_EXTERNAL:
608 case GRPC_MDELEM_STORAGE_STATIC:
610 return reinterpret_cast<void*>(
611 grpc_static_mdelem_user_data
612 [reinterpret_cast<grpc_core::StaticMetadata*>(
613 GRPC_MDELEM_DATA(md)) -
614 grpc_static_mdelem_table()]);
615 case GRPC_MDELEM_STORAGE_ALLOCATED: {
616 auto* am = reinterpret_cast<AllocatedMetadata*>(GRPC_MDELEM_DATA(md));
617 return set_user_data(am->user_data(), destroy_func, data);
619 case GRPC_MDELEM_STORAGE_INTERNED: {
620 auto* im = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(md);
621 GPR_DEBUG_ASSERT(!is_mdelem_static(md));
622 return set_user_data(im->user_data(), destroy_func, data);
625 GPR_UNREACHABLE_CODE(return nullptr);
628 bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b) {
629 if (a.payload == b.payload) return true;
630 if (GRPC_MDELEM_IS_INTERNED(a) && GRPC_MDELEM_IS_INTERNED(b)) return false;
631 if (GRPC_MDISNULL(a) || GRPC_MDISNULL(b)) return false;
632 return grpc_slice_eq(GRPC_MDKEY(a), GRPC_MDKEY(b)) &&
633 grpc_slice_eq(GRPC_MDVALUE(a), GRPC_MDVALUE(b));
636 static void note_disposed_interned_metadata(uint32_t hash) {
637 mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
638 gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1);
641 void grpc_mdelem_do_unref(grpc_mdelem gmd DEBUG_ARGS) {
642 switch (GRPC_MDELEM_STORAGE(gmd)) {
643 case GRPC_MDELEM_STORAGE_EXTERNAL:
644 case GRPC_MDELEM_STORAGE_STATIC:
646 case GRPC_MDELEM_STORAGE_INTERNED: {
647 auto* md = reinterpret_cast<InternedMetadata*> GRPC_MDELEM_DATA(gmd);
648 uint32_t hash = md->hash();
649 if (GPR_UNLIKELY(md->Unref(FWD_DEBUG_ARGS))) {
650 /* once the refcount hits zero, some other thread can come along and
651 free md at any time: it's unsafe from this point on to access it */
652 note_disposed_interned_metadata(hash);
656 case GRPC_MDELEM_STORAGE_ALLOCATED: {
657 auto* md = reinterpret_cast<AllocatedMetadata*> GRPC_MDELEM_DATA(gmd);
658 if (GPR_UNLIKELY(md->Unref(FWD_DEBUG_ARGS))) {
659 grpc_core::Delete(md);
666 void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr,
667 uint32_t hash DEBUG_ARGS) {
669 case GRPC_MDELEM_STORAGE_EXTERNAL:
670 case GRPC_MDELEM_STORAGE_STATIC:
672 case GRPC_MDELEM_STORAGE_INTERNED: {
673 note_disposed_interned_metadata(hash);
676 case GRPC_MDELEM_STORAGE_ALLOCATED: {
677 grpc_core::Delete(reinterpret_cast<AllocatedMetadata*>(ptr));