3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #ifndef GRPC_CORE_LIB_TRANSPORT_METADATA_H
20 #define GRPC_CORE_LIB_TRANSPORT_METADATA_H
22 #include <grpc/support/port_platform.h>
24 #include <grpc/impl/codegen/log.h>
26 #include <grpc/grpc.h>
27 #include <grpc/slice.h>
29 #include "src/core/lib/debug/trace.h"
30 #include "src/core/lib/gpr/useful.h"
31 #include "src/core/lib/gprpp/atomic.h"
32 #include "src/core/lib/gprpp/sync.h"
33 #include "src/core/lib/slice/slice_utils.h"
35 extern grpc_core::DebugOnlyTraceFlag grpc_trace_metadata;
37 /* This file provides a mechanism for tracking metadata through the grpc stack.
38 It's not intended for consumption outside of the library.
40 Metadata is tracked in the context of a grpc_mdctx. For the time being there
41 is one of these per-channel, avoiding cross channel interference with memory
42 use and lock contention.
44 The context tracks unique strings (grpc_mdstr) and pairs of strings
45 (grpc_mdelem). Any of these objects can be checked for equality by comparing
46 their pointers. These objects are reference counted.
48 grpc_mdelem can additionally store a (non-NULL) user data pointer. This
49 pointer is intended to be used to cache semantic meaning of a metadata
50 element. For example, an OAuth token may cache the credentials it represents
51 and the time at which it expires in the mdelem user data.
53 Combining this metadata cache and the hpack compression table allows us to
54 simply lookup complete preparsed objects quickly, incurring a few atomic
55 ops per metadata element on the fast path.
57 grpc_mdelem instances MAY live longer than their refcount implies, and are
58 garbage collected periodically, meaning cached data can easily outlive a
61 STATIC METADATA: in static_metadata.h we declare a set of static metadata.
62 These mdelems and mdstrs are available via pre-declared code generated macros
63 and are available to code anywhere between grpc_init() and grpc_shutdown().
64 They are not refcounted, but can be passed to _ref and _unref functions
65 declared here - in which case those functions are effectively no-ops. */
67 /* Forward declarations */
68 typedef struct grpc_mdelem grpc_mdelem;
70 /* if changing this, make identical changes in:
71 - grpc_core::{InternedMetadata, AllocatedMetadata}
72 - grpc_metadata in grpc_types.h */
73 typedef struct grpc_mdelem_data {
75 const grpc_slice value;
76 /* there is a private part to this in metadata.c */
79 /* GRPC_MDELEM_STORAGE_* enum values that can be treated as interned always have
80 this bit set in their integer value */
81 #define GRPC_MDELEM_STORAGE_INTERNED_BIT 1
83 /* External and static storage metadata has no refcount to ref/unref. Allocated
84 * and interned metadata do have a refcount. Metadata ref and unref methods use
85 * a switch statement on this enum to determine which behaviour to execute.
86 * Keeping the no-ref cases together and the ref-cases together leads to
87 * slightly better code generation (9 inlined instructions rather than 10). */
89 /* memory pointed to by grpc_mdelem::payload is owned by an external system */
90 GRPC_MDELEM_STORAGE_EXTERNAL = 0,
91 /* memory is in the static metadata table */
92 GRPC_MDELEM_STORAGE_STATIC = GRPC_MDELEM_STORAGE_INTERNED_BIT,
93 /* memory pointed to by grpc_mdelem::payload is allocated by the metadata
95 GRPC_MDELEM_STORAGE_ALLOCATED = 2,
96 /* memory pointed to by grpc_mdelem::payload is interned by the metadata
98 GRPC_MDELEM_STORAGE_INTERNED = 2 | GRPC_MDELEM_STORAGE_INTERNED_BIT,
99 } grpc_mdelem_data_storage;
102 /* a grpc_mdelem_data* generally, with the two lower bits signalling memory
103 ownership as per grpc_mdelem_data_storage */
107 #define GRPC_MDELEM_DATA(md) ((grpc_mdelem_data*)((md).payload & ~(uintptr_t)3))
108 #define GRPC_MDELEM_STORAGE(md) \
109 ((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3))
111 #define GRPC_MAKE_MDELEM(data, storage) \
112 (grpc_mdelem{((uintptr_t)(data)) | ((uintptr_t)storage)})
114 #define GRPC_MAKE_MDELEM(data, storage) \
115 ((grpc_mdelem){((uintptr_t)(data)) | ((uintptr_t)storage)})
117 #define GRPC_MDELEM_IS_INTERNED(md) \
118 ((grpc_mdelem_data_storage)((md).payload & \
119 (uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT))
121 /* Given arbitrary input slices, create a grpc_mdelem object. The caller refs
122 * the input slices; we unref them. This method is always safe to call; however,
123 * if we know data about the slices in question (e.g. if we knew our key was
124 * static) we can call specializations that save on cycle count. */
125 grpc_mdelem grpc_mdelem_from_slices(const grpc_slice& key,
126 const grpc_slice& value);
128 /* Like grpc_mdelem_from_slices, but we know that key is a static slice. This
129 saves us a few branches and a no-op call to md_unref() for the key. */
130 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
131 const grpc_slice& value);
133 /* Like grpc_mdelem_from_slices, but key is static and val is static. */
134 grpc_mdelem grpc_mdelem_from_slices(
135 const grpc_core::StaticMetadataSlice& key,
136 const grpc_core::StaticMetadataSlice& value);
138 /* Like grpc_mdelem_from_slices, but key is static and val is interned. */
139 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
140 const grpc_core::ManagedMemorySlice& value);
142 /* Like grpc_mdelem_from_slices, but key and val are interned. */
143 grpc_mdelem grpc_mdelem_from_slices(const grpc_core::ManagedMemorySlice& key,
144 const grpc_core::ManagedMemorySlice& value);
146 /* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata
147 object as backing storage (so lifetimes should align) */
148 grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata);
150 /* Does not unref the slices; if a new non-interned mdelem is needed, allocates
151 one if compatible_external_backing_store is NULL, or uses
152 compatible_external_backing_store if it is non-NULL (in which case it's the
153 users responsibility to ensure that it outlives usage) */
154 grpc_mdelem grpc_mdelem_create(
155 const grpc_slice& key, const grpc_slice& value,
156 grpc_mdelem_data* compatible_external_backing_store);
158 /* Like grpc_mdelem_create, but we know that key is static. */
159 grpc_mdelem grpc_mdelem_create(
160 const grpc_core::StaticMetadataSlice& key, const grpc_slice& value,
161 grpc_mdelem_data* compatible_external_backing_store);
163 #define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key)
164 #define GRPC_MDVALUE(md) (GRPC_MDELEM_DATA(md)->value)
166 bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b);
167 /* Often we compare metadata where we know a-priori that the second parameter is
168 * static, and that the keys match. This most commonly happens when processing
169 * metadata batch callouts in initial/trailing filters. In this case, fastpath
170 * grpc_mdelem_eq and remove unnecessary checks. */
171 inline bool grpc_mdelem_static_value_eq(grpc_mdelem a, grpc_mdelem b_static) {
172 if (a.payload == b_static.payload) return true;
173 return grpc_slice_eq_static_interned(GRPC_MDVALUE(a), GRPC_MDVALUE(b_static));
175 #define GRPC_MDISNULL(md) (GRPC_MDELEM_DATA(md) == NULL)
177 inline bool grpc_mdelem_both_interned_eq(grpc_mdelem a_interned,
178 grpc_mdelem b_interned) {
179 GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(a_interned) ||
180 GRPC_MDISNULL(a_interned));
181 GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(b_interned) ||
182 GRPC_MDISNULL(b_interned));
183 return a_interned.payload == b_interned.payload;
186 /* Mutator and accessor for grpc_mdelem user data. The destructor function
187 is used as a type tag and is checked during user_data fetch. */
188 void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*if_destroy_func)(void*));
189 void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
192 // Defined in metadata.cc.
196 void grpc_mdelem_trace_ref(void* md, const grpc_slice& key,
197 const grpc_slice& value, intptr_t refcnt,
198 const char* file, int line);
199 void grpc_mdelem_trace_unref(void* md, const grpc_slice& key,
200 const grpc_slice& value, intptr_t refcnt,
201 const char* file, int line);
203 namespace grpc_core {
205 typedef void (*destroy_user_data_func)(void* data);
209 grpc_core::Atomic<destroy_user_data_func> destroy_user_data;
210 grpc_core::Atomic<void*> data;
213 class StaticMetadata {
215 StaticMetadata(const grpc_slice& key, const grpc_slice& value, uintptr_t idx)
216 : kv_({key, value}), hash_(0), static_idx_(idx) {}
218 const grpc_mdelem_data& data() const { return kv_; }
221 uint32_t hash() { return hash_; }
222 uintptr_t StaticIndex() { return static_idx_; }
225 grpc_mdelem_data kv_;
227 /* private only data */
229 uintptr_t static_idx_;
232 class RefcountedMdBase {
234 RefcountedMdBase(const grpc_slice& key, const grpc_slice& value)
235 : key_(key), value_(value), refcnt_(1) {}
236 RefcountedMdBase(const grpc_slice& key, const grpc_slice& value,
238 : key_(key), value_(value), refcnt_(1), hash_(hash) {}
240 const grpc_slice& key() const { return key_; }
241 const grpc_slice& value() const { return value_; }
242 uint32_t hash() { return hash_; }
245 void Ref(const char* file, int line) {
246 grpc_mdelem_trace_ref(this, key_, value_, RefValue(), file, line);
247 const intptr_t prior = refcnt_.FetchAdd(1, MemoryOrder::RELAXED);
248 GPR_ASSERT(prior > 0);
250 bool Unref(const char* file, int line) {
251 grpc_mdelem_trace_unref(this, key_, value_, RefValue(), file, line);
256 /* we can assume the ref count is >= 1 as the application is calling
257 this function - meaning that no adjustment to mdtab_free is necessary,
258 simplifying the logic here to be just an atomic increment */
259 refcnt_.FetchAdd(1, MemoryOrder::RELAXED);
262 const intptr_t prior = refcnt_.FetchSub(1, MemoryOrder::ACQ_REL);
263 GPR_DEBUG_ASSERT(prior > 0);
269 void TraceAtStart(const char* tag);
272 intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); }
273 bool AllRefsDropped() { return refcnt_.Load(MemoryOrder::ACQUIRE) == 0; }
274 bool FirstRef() { return refcnt_.FetchAdd(1, MemoryOrder::RELAXED) == 0; }
277 /* must be byte compatible with grpc_mdelem_data */
280 grpc_core::Atomic<intptr_t> refcnt_;
284 class InternedMetadata : public RefcountedMdBase {
286 // TODO(arjunroy): Change to use strongly typed slices instead.
289 explicit BucketLink(InternedMetadata* md) : next(md) {}
291 InternedMetadata* next = nullptr;
293 InternedMetadata(const grpc_slice& key, const grpc_slice& value,
294 uint32_t hash, InternedMetadata* next);
295 InternedMetadata(const grpc_slice& key, const grpc_slice& value,
296 uint32_t hash, InternedMetadata* next, const NoRefKey*);
299 void RefWithShardLocked(mdtab_shard* shard);
300 UserData* user_data() { return &user_data_; }
301 InternedMetadata* bucket_next() { return link_.next; }
302 void set_bucket_next(InternedMetadata* md) { link_.next = md; }
304 static size_t CleanupLinkedMetadata(BucketLink* head);
311 /* Shadow structure for grpc_mdelem_data for allocated elements */
312 class AllocatedMetadata : public RefcountedMdBase {
314 // TODO(arjunroy): Change to use strongly typed slices instead.
316 AllocatedMetadata(const grpc_slice& key, const grpc_slice& value);
317 AllocatedMetadata(const grpc_core::ManagedMemorySlice& key,
318 const grpc_core::UnmanagedMemorySlice& value);
319 AllocatedMetadata(const grpc_core::ExternallyManagedSlice& key,
320 const grpc_core::UnmanagedMemorySlice& value);
321 AllocatedMetadata(const grpc_slice& key, const grpc_slice& value,
323 ~AllocatedMetadata();
325 UserData* user_data() { return &user_data_; }
331 } // namespace grpc_core
334 #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
335 inline grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd, const char* file,
337 #else // ifndef NDEBUG
338 #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
339 inline grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd) {
340 #endif // ifndef NDEBUG
341 switch (GRPC_MDELEM_STORAGE(gmd)) {
342 case GRPC_MDELEM_STORAGE_EXTERNAL:
343 case GRPC_MDELEM_STORAGE_STATIC:
345 case GRPC_MDELEM_STORAGE_INTERNED: {
347 reinterpret_cast<grpc_core::InternedMetadata*> GRPC_MDELEM_DATA(gmd);
348 /* use C assert to have this removed in opt builds */
356 case GRPC_MDELEM_STORAGE_ALLOCATED: {
358 reinterpret_cast<grpc_core::AllocatedMetadata*> GRPC_MDELEM_DATA(gmd);
371 #define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__)
372 void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr,
373 uint32_t hash, const char* file, int line);
374 inline void grpc_mdelem_unref(grpc_mdelem gmd, const char* file, int line) {
376 #define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s))
377 void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr,
379 inline void grpc_mdelem_unref(grpc_mdelem gmd) {
381 const grpc_mdelem_data_storage storage = GRPC_MDELEM_STORAGE(gmd);
383 case GRPC_MDELEM_STORAGE_EXTERNAL:
384 case GRPC_MDELEM_STORAGE_STATIC:
386 case GRPC_MDELEM_STORAGE_INTERNED:
387 case GRPC_MDELEM_STORAGE_ALLOCATED:
389 reinterpret_cast<grpc_core::RefcountedMdBase*> GRPC_MDELEM_DATA(gmd);
390 /* once the refcount hits zero, some other thread can come along and
391 free an interned md at any time: it's unsafe from this point on to
392 access it so we read the hash now. */
393 uint32_t hash = md->hash();
395 if (GPR_UNLIKELY(md->Unref(file, line))) {
396 grpc_mdelem_on_final_unref(storage, md, hash, file, line);
398 if (GPR_UNLIKELY(md->Unref())) {
399 grpc_mdelem_on_final_unref(storage, md, hash);
406 #define GRPC_MDNULL GRPC_MAKE_MDELEM(NULL, GRPC_MDELEM_STORAGE_EXTERNAL)
408 /* We add 32 bytes of padding as per RFC-7540 section 6.5.2. */
409 #define GRPC_MDELEM_LENGTH(e) \
410 (GRPC_SLICE_LENGTH(GRPC_MDKEY((e))) + GRPC_SLICE_LENGTH(GRPC_MDVALUE((e))) + \
413 #define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash))
415 void grpc_mdctx_global_init(void);
416 void grpc_mdctx_global_shutdown();
418 /* Like grpc_mdelem_from_slices, but we know that key is a static or interned
419 slice and value is not static or interned. This gives us an inlinable
420 fastpath - we know we must allocate metadata now, and that we do not need to
421 unref the value (rather, we just transfer the ref). We can avoid a ref since:
422 1) the key slice is passed in already ref'd
423 2) We're guaranteed to create a new Allocated slice, thus meaning the
424 ref can be considered 'transferred'.*/
425 inline grpc_mdelem grpc_mdelem_from_slices(
426 const grpc_core::ManagedMemorySlice& key,
427 const grpc_core::UnmanagedMemorySlice& value) {
428 using grpc_core::AllocatedMetadata;
429 return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
430 GRPC_MDELEM_STORAGE_ALLOCATED);
433 inline grpc_mdelem grpc_mdelem_from_slices(
434 const grpc_core::ExternallyManagedSlice& key,
435 const grpc_core::UnmanagedMemorySlice& value) {
436 using grpc_core::AllocatedMetadata;
437 return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
438 GRPC_MDELEM_STORAGE_ALLOCATED);
441 inline grpc_mdelem grpc_mdelem_from_slices(
442 const grpc_core::StaticMetadataSlice& key,
443 const grpc_core::UnmanagedMemorySlice& value) {
444 using grpc_core::AllocatedMetadata;
445 return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
446 GRPC_MDELEM_STORAGE_ALLOCATED);
449 #endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_H */