3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
21 #include "src/core/ext/transport/chttp2/transport/hpack_encoder.h"
26 /* This is here for grpc_is_binary_header
27 * TODO(murgatroid99): Remove this
29 #include <grpc/grpc.h>
31 #include <grpc/support/alloc.h>
32 #include <grpc/support/log.h>
34 #include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
35 #include "src/core/ext/transport/chttp2/transport/hpack_table.h"
36 #include "src/core/ext/transport/chttp2/transport/varint.h"
37 #include "src/core/lib/debug/stats.h"
38 #include "src/core/lib/slice/slice_internal.h"
39 #include "src/core/lib/slice/slice_string_helpers.h"
40 #include "src/core/lib/surface/validate_metadata.h"
41 #include "src/core/lib/transport/metadata.h"
42 #include "src/core/lib/transport/static_metadata.h"
43 #include "src/core/lib/transport/timeout_encoding.h"
45 #define HASH_FRAGMENT_MASK (GRPC_CHTTP2_HPACKC_NUM_VALUES - 1)
46 #define HASH_FRAGMENT_1(x) ((x)&HASH_FRAGMENT_MASK)
47 #define HASH_FRAGMENT_2(x) \
48 (((x) >> GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS) & HASH_FRAGMENT_MASK)
49 #define HASH_FRAGMENT_3(x) \
50 (((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 2)) & HASH_FRAGMENT_MASK)
51 #define HASH_FRAGMENT_4(x) \
52 (((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 3)) & HASH_FRAGMENT_MASK)
54 /* if the probability of this item being seen again is < 1/x then don't add
56 #define ONE_ON_ADD_PROBABILITY (GRPC_CHTTP2_HPACKC_NUM_VALUES >> 1)
57 /* don't consider adding anything bigger than this to the hpack table */
58 #define MAX_DECODER_SPACE_USAGE 512
60 #define DATA_FRAME_HEADER_SIZE 9
62 static grpc_slice_refcount terminal_slice_refcount(
63 grpc_slice_refcount::Type::STATIC);
64 static const grpc_slice terminal_slice = {
65 &terminal_slice_refcount, /* refcount */
66 {{0, nullptr}} /* data.refcounted */
71 /* number of bytes in 'output' when we started the frame - used to calculate
73 size_t output_length_at_start_of_frame;
74 /* index (in output) of the header for the current frame */
76 /* have we seen a regular (non-colon-prefixed) header yet? */
77 uint8_t seen_regular_header;
78 /* output stream id */
80 grpc_slice_buffer* output;
81 grpc_transport_one_way_stats* stats;
82 /* maximum size of a frame */
83 size_t max_frame_size;
84 bool use_true_binary_metadata;
87 /* fills p (which is expected to be DATA_FRAME_HEADER_SIZE bytes long)
88 * with a data frame header */
89 static void fill_header(uint8_t* p, uint8_t type, uint32_t id, size_t len,
91 /* len is the current frame size (i.e. for the frame we're finishing).
93 1) We called ensure_space(), (i.e. add_tiny_header_data()) and adding
94 'need_bytes' to the frame would cause us to exceed st->max_frame_size.
95 2) We called add_header_data, and adding the slice would cause us to exceed
97 3) We're done encoding the header.
99 Thus, len is always <= st->max_frame_size.
100 st->max_frame_size is derived from GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
101 which has a max allowable value of 16777215 (see chttp_transport.cc).
102 Thus, the following assert can be a debug assert. */
103 GPR_DEBUG_ASSERT(len < 16777316);
104 *p++ = static_cast<uint8_t>(len >> 16);
105 *p++ = static_cast<uint8_t>(len >> 8);
106 *p++ = static_cast<uint8_t>(len);
109 *p++ = static_cast<uint8_t>(id >> 24);
110 *p++ = static_cast<uint8_t>(id >> 16);
111 *p++ = static_cast<uint8_t>(id >> 8);
112 *p++ = static_cast<uint8_t>(id);
115 static size_t current_frame_size(framer_state* st) {
116 const size_t frame_size =
117 st->output->length - st->output_length_at_start_of_frame;
118 GPR_DEBUG_ASSERT(frame_size <= st->max_frame_size);
122 /* finish a frame - fill in the previously reserved header */
123 static void finish_frame(framer_state* st, int is_header_boundary,
124 int is_last_in_stream) {
126 type = st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER
127 : GRPC_CHTTP2_FRAME_CONTINUATION;
129 GRPC_SLICE_START_PTR(st->output->slices[st->header_idx]), type,
130 st->stream_id, current_frame_size(st),
131 static_cast<uint8_t>(
132 (is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) |
133 (is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0)));
134 st->stats->framing_bytes += DATA_FRAME_HEADER_SIZE;
135 st->is_first_frame = 0;
138 /* begin a new frame: reserve off header space, remember how many bytes we'd
139 output before beginning */
140 static void begin_frame(framer_state* st) {
142 reserved.refcount = nullptr;
143 reserved.data.inlined.length = DATA_FRAME_HEADER_SIZE;
144 st->header_idx = grpc_slice_buffer_add_indexed(st->output, reserved);
145 st->output_length_at_start_of_frame = st->output->length;
148 /* make sure that the current frame is of the type desired, and has sufficient
149 space to add at least about_to_add bytes -- finishes the current frame if
151 static void ensure_space(framer_state* st, size_t need_bytes) {
152 if (GPR_LIKELY(current_frame_size(st) + need_bytes <= st->max_frame_size)) {
155 finish_frame(st, 0, 0);
159 /* increment a filter count, halve all counts if one element reaches max */
160 static void inc_filter(uint8_t idx, uint32_t* sum, uint8_t* elems) {
162 if (elems[idx] < 255) {
167 for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
174 static void add_header_data(framer_state* st, grpc_slice slice) {
175 size_t len = GRPC_SLICE_LENGTH(slice);
177 if (len == 0) return;
178 remaining = st->max_frame_size - current_frame_size(st);
179 if (len <= remaining) {
180 st->stats->header_bytes += len;
181 grpc_slice_buffer_add(st->output, slice);
183 st->stats->header_bytes += remaining;
184 grpc_slice_buffer_add(st->output, grpc_slice_split_head(&slice, remaining));
185 finish_frame(st, 0, 0);
187 add_header_data(st, slice);
191 static uint8_t* add_tiny_header_data(framer_state* st, size_t len) {
192 ensure_space(st, len);
193 st->stats->header_bytes += len;
194 return grpc_slice_buffer_tiny_add(st->output, len);
197 static void evict_entry(grpc_chttp2_hpack_compressor* c) {
198 c->tail_remote_index++;
199 GPR_ASSERT(c->tail_remote_index > 0);
200 GPR_ASSERT(c->table_size >=
201 c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
202 GPR_ASSERT(c->table_elems > 0);
203 c->table_size = static_cast<uint16_t>(
205 c->table_elem_size[c->tail_remote_index % c->cap_table_elems]);
209 // Reserve space in table for the new element, evict entries if needed.
210 // Return the new index of the element. Return 0 to indicate not adding to
212 static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor* c,
214 uint32_t new_index = c->tail_remote_index + c->table_elems + 1;
215 GPR_DEBUG_ASSERT(elem_size < 65536);
217 // TODO(arjunroy): Re-examine semantics
218 if (elem_size > c->max_table_size) {
219 while (c->table_size > 0) {
225 /* Reserve space for this element in the remote table: if this overflows
226 the current table, drop elements until it fits, matching the decompressor
228 while (c->table_size + elem_size > c->max_table_size) {
231 // TODO(arjunroy): Are we conflating size in bytes vs. membership?
232 GPR_ASSERT(c->table_elems < c->max_table_size);
233 c->table_elem_size[new_index % c->cap_table_elems] =
234 static_cast<uint16_t>(elem_size);
235 c->table_size = static_cast<uint16_t>(c->table_size + elem_size);
241 // Add a key to the dynamic table. Both key and value will be added to table at
243 static void add_key_with_index(grpc_chttp2_hpack_compressor* c,
244 grpc_mdelem elem, uint32_t new_index,
246 if (new_index == 0) {
250 /* Store the key into {entries,indices}_keys */
251 if (grpc_slice_static_interned_equal(
252 c->entries_keys[HASH_FRAGMENT_2(key_hash)], GRPC_MDKEY(elem))) {
253 c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
254 } else if (grpc_slice_static_interned_equal(
255 c->entries_keys[HASH_FRAGMENT_3(key_hash)],
257 c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
258 } else if (c->entries_keys[HASH_FRAGMENT_2(key_hash)].refcount ==
259 &terminal_slice_refcount) {
260 c->entries_keys[HASH_FRAGMENT_2(key_hash)] =
261 grpc_slice_ref_internal(GRPC_MDKEY(elem));
262 c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
263 } else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)].refcount ==
264 &terminal_slice_refcount) {
265 c->entries_keys[HASH_FRAGMENT_3(key_hash)] =
266 grpc_slice_ref_internal(GRPC_MDKEY(elem));
267 c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
268 } else if (c->indices_keys[HASH_FRAGMENT_2(key_hash)] <
269 c->indices_keys[HASH_FRAGMENT_3(key_hash)]) {
270 grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
271 c->entries_keys[HASH_FRAGMENT_2(key_hash)] =
272 grpc_slice_ref_internal(GRPC_MDKEY(elem));
273 c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
275 grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
276 c->entries_keys[HASH_FRAGMENT_3(key_hash)] =
277 grpc_slice_ref_internal(GRPC_MDKEY(elem));
278 c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
282 /* add an element to the decoder table */
283 static void add_elem_with_index(grpc_chttp2_hpack_compressor* c,
284 grpc_mdelem elem, uint32_t new_index,
285 uint32_t elem_hash, uint32_t key_hash) {
286 if (new_index == 0) {
289 GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(elem));
291 /* Store this element into {entries,indices}_elem */
292 if (grpc_mdelem_both_interned_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)],
294 /* already there: update with new index */
295 c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
296 } else if (grpc_mdelem_both_interned_eq(
297 c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem)) {
298 /* already there (cuckoo): update with new index */
299 c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
300 } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_2(elem_hash)])) {
301 /* not there, but a free element: add */
302 c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
303 c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
304 } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_3(elem_hash)])) {
305 /* not there (cuckoo), but a free element: add */
306 c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
307 c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
308 } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
309 c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
310 /* not there: replace oldest */
311 GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
312 c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
313 c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
315 /* not there: replace oldest */
316 GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
317 c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
318 c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
321 add_key_with_index(c, elem, new_index, key_hash);
324 static void add_elem(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
325 size_t elem_size, uint32_t elem_hash, uint32_t key_hash) {
326 uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
327 add_elem_with_index(c, elem, new_index, elem_hash, key_hash);
330 static void add_key(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
331 size_t elem_size, uint32_t key_hash) {
332 uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
333 add_key_with_index(c, elem, new_index, key_hash);
336 static void emit_indexed(grpc_chttp2_hpack_compressor* c, uint32_t elem_index,
338 GRPC_STATS_INC_HPACK_SEND_INDEXED();
339 uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
340 GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
345 wire_value(uint8_t huffman_prefix, bool insert_null_before_wire_value,
346 const grpc_slice& slice)
348 huffman_prefix(huffman_prefix),
349 insert_null_before_wire_value(insert_null_before_wire_value),
350 length(GRPC_SLICE_LENGTH(slice) +
351 (insert_null_before_wire_value ? 1 : 0)) {}
352 // While wire_value is const from the POV of hpack encoder code, actually
353 // adding it to a slice buffer will possibly split the slice.
354 const grpc_slice data;
355 const uint8_t huffman_prefix;
356 const bool insert_null_before_wire_value;
360 template <bool mdkey_definitely_interned>
361 static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) {
362 const bool is_bin_hdr =
363 mdkey_definitely_interned
364 ? grpc_is_refcounted_slice_binary_header(GRPC_MDKEY(elem))
365 : grpc_is_binary_header_internal(GRPC_MDKEY(elem));
366 const grpc_slice& value = GRPC_MDVALUE(elem);
368 if (true_binary_enabled) {
369 GRPC_STATS_INC_HPACK_SEND_BINARY();
370 return wire_value(0x00, true, grpc_slice_ref_internal(value));
372 GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64();
373 return wire_value(0x80, false,
374 grpc_chttp2_base64_encode_and_huffman_compress(value));
377 /* TODO(ctiller): opportunistically compress non-binary headers */
378 GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
379 return wire_value(0x00, false, grpc_slice_ref_internal(value));
383 static uint32_t wire_value_length(const wire_value& v) {
384 GPR_DEBUG_ASSERT(v.length <= UINT32_MAX);
385 return static_cast<uint32_t>(v.length);
389 enum class EmitLitHdrType { INC_IDX, NO_IDX };
391 enum class EmitLitHdrVType { INC_IDX_V, NO_IDX_V };
394 template <EmitLitHdrType type>
395 static void emit_lithdr(grpc_chttp2_hpack_compressor* c, uint32_t key_index,
396 grpc_mdelem elem, framer_state* st) {
398 case EmitLitHdrType::INC_IDX:
399 GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX();
401 case EmitLitHdrType::NO_IDX:
402 GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX();
405 const uint32_t len_pfx = type == EmitLitHdrType::INC_IDX
406 ? GRPC_CHTTP2_VARINT_LENGTH(key_index, 2)
407 : GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
408 const wire_value value =
409 get_wire_value<true>(elem, st->use_true_binary_metadata);
410 const uint32_t len_val = wire_value_length(value);
411 const uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
412 GPR_DEBUG_ASSERT(len_pfx + len_val_len < GRPC_SLICE_INLINED_SIZE);
413 uint8_t* data = add_tiny_header_data(
415 len_pfx + len_val_len + (value.insert_null_before_wire_value ? 1 : 0));
417 case EmitLitHdrType::INC_IDX:
418 GRPC_CHTTP2_WRITE_VARINT(key_index, 2, 0x40, data, len_pfx);
420 case EmitLitHdrType::NO_IDX:
421 GRPC_CHTTP2_WRITE_VARINT(key_index, 4, 0x00, data, len_pfx);
424 GRPC_CHTTP2_WRITE_VARINT(len_val, 1, value.huffman_prefix, &data[len_pfx],
426 if (value.insert_null_before_wire_value) {
427 data[len_pfx + len_val_len] = 0;
429 add_header_data(st, value.data);
432 template <EmitLitHdrVType type>
433 static void emit_lithdr_v(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
436 case EmitLitHdrVType::INC_IDX_V:
437 GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V();
439 case EmitLitHdrVType::NO_IDX_V:
440 GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V();
443 GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
444 const uint32_t len_key =
445 static_cast<uint32_t>(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)));
446 const wire_value value =
447 type == EmitLitHdrVType::INC_IDX_V
448 ? get_wire_value<true>(elem, st->use_true_binary_metadata)
449 : get_wire_value<false>(elem, st->use_true_binary_metadata);
450 const uint32_t len_val = wire_value_length(value);
451 const uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
452 const uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
453 GPR_DEBUG_ASSERT(len_key <= UINT32_MAX);
454 GPR_DEBUG_ASSERT(1 + len_key_len < GRPC_SLICE_INLINED_SIZE);
455 uint8_t* key_buf = add_tiny_header_data(st, 1 + len_key_len);
456 key_buf[0] = type == EmitLitHdrVType::INC_IDX_V ? 0x40 : 0x00;
457 GRPC_CHTTP2_WRITE_VARINT(len_key, 1, 0x00, &key_buf[1], len_key_len);
458 add_header_data(st, grpc_slice_ref_internal(GRPC_MDKEY(elem)));
459 uint8_t* value_buf = add_tiny_header_data(
460 st, len_val_len + (value.insert_null_before_wire_value ? 1 : 0));
461 GRPC_CHTTP2_WRITE_VARINT(len_val, 1, value.huffman_prefix, value_buf,
463 if (value.insert_null_before_wire_value) {
464 value_buf[len_val_len] = 0;
466 add_header_data(st, value.data);
469 static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor* c,
471 uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(c->max_table_size, 3);
472 GRPC_CHTTP2_WRITE_VARINT(c->max_table_size, 3, 0x20,
473 add_tiny_header_data(st, len), len);
474 c->advertise_table_size_change = 0;
477 static void GPR_ATTRIBUTE_NOINLINE hpack_enc_log(grpc_mdelem elem) {
478 char* k = grpc_slice_to_c_string(GRPC_MDKEY(elem));
480 if (grpc_is_binary_header_internal(GRPC_MDKEY(elem))) {
481 v = grpc_dump_slice(GRPC_MDVALUE(elem), GPR_DUMP_HEX);
483 v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
487 "Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
488 k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem),
489 grpc_slice_is_interned(GRPC_MDKEY(elem)),
490 grpc_slice_is_interned(GRPC_MDVALUE(elem)));
495 static uint32_t dynidx(grpc_chttp2_hpack_compressor* c, uint32_t elem_index) {
496 return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
497 c->table_elems - elem_index;
500 struct EmitIndexedStatus {
501 EmitIndexedStatus() = default;
502 EmitIndexedStatus(uint32_t elem_hash, bool emitted, bool can_add)
503 : elem_hash(elem_hash), emitted(emitted), can_add(can_add) {}
504 const uint32_t elem_hash = 0;
505 const bool emitted = false;
506 const bool can_add = false;
509 static EmitIndexedStatus maybe_emit_indexed(grpc_chttp2_hpack_compressor* c,
512 const uint32_t elem_hash =
513 GRPC_MDELEM_STORAGE(elem) == GRPC_MDELEM_STORAGE_INTERNED
514 ? reinterpret_cast<grpc_core::InternedMetadata*>(
515 GRPC_MDELEM_DATA(elem))
517 : reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(elem))
520 inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
522 /* is this elem currently in the decoders table? */
523 if (grpc_mdelem_both_interned_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)],
525 c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
526 /* HIT: complete element (first cuckoo hash) */
527 emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
529 return EmitIndexedStatus(elem_hash, true, false);
531 if (grpc_mdelem_both_interned_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)],
533 c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
534 /* HIT: complete element (second cuckoo hash) */
535 emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
537 return EmitIndexedStatus(elem_hash, true, false);
540 const bool can_add = c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
541 c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
542 return EmitIndexedStatus(elem_hash, false, can_add);
545 static void emit_maybe_add(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
546 framer_state* st, uint32_t indices_key,
547 bool should_add_elem, size_t decoder_space_usage,
548 uint32_t elem_hash, uint32_t key_hash) {
549 if (should_add_elem) {
550 emit_lithdr<EmitLitHdrType::INC_IDX>(c, dynidx(c, indices_key), elem, st);
551 add_elem(c, elem, decoder_space_usage, elem_hash, key_hash);
553 emit_lithdr<EmitLitHdrType::NO_IDX>(c, dynidx(c, indices_key), elem, st);
557 /* encode an mdelem */
558 static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
560 /* User-provided key len validated in grpc_validate_header_key_is_legal(). */
561 GPR_DEBUG_ASSERT(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)) > 0);
562 /* Header ordering: all reserved headers (prefixed with ':') must precede
563 * regular headers. This can be a debug assert, since:
564 * 1) User cannot give us ':' headers (grpc_validate_header_key_is_legal()).
565 * 2) grpc filters/core should be checked during debug builds. */
567 if (GRPC_SLICE_START_PTR(GRPC_MDKEY(elem))[0] != ':') { /* regular header */
568 st->seen_regular_header = 1;
571 st->seen_regular_header == 0 &&
572 "Reserved header (colon-prefixed) happening after regular ones.");
575 if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
579 const bool elem_interned = GRPC_MDELEM_IS_INTERNED(elem);
580 const bool key_interned =
581 elem_interned || grpc_slice_is_interned(GRPC_MDKEY(elem));
583 /* Key is not interned, emit literals. */
585 emit_lithdr_v<EmitLitHdrVType::NO_IDX_V>(c, elem, st);
588 /* Interned metadata => maybe already indexed. */
589 const EmitIndexedStatus ret =
590 elem_interned ? maybe_emit_indexed(c, elem, st) : EmitIndexedStatus();
595 /* should this elem be in the table? */
596 const size_t decoder_space_usage =
597 grpc_chttp2_get_size_in_hpack_table(elem, st->use_true_binary_metadata);
598 const bool decoder_space_available =
599 decoder_space_usage < MAX_DECODER_SPACE_USAGE;
600 const bool should_add_elem =
601 elem_interned && decoder_space_available && ret.can_add;
602 const uint32_t elem_hash = ret.elem_hash;
604 /* no hits for the elem... maybe there's a key? */
605 const uint32_t key_hash = GRPC_MDKEY(elem).refcount->Hash(GRPC_MDKEY(elem));
606 uint32_t indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)];
607 if (grpc_slice_static_interned_equal(
608 c->entries_keys[HASH_FRAGMENT_2(key_hash)], GRPC_MDKEY(elem)) &&
609 indices_key > c->tail_remote_index) {
610 /* HIT: key (first cuckoo hash) */
611 emit_maybe_add(c, elem, st, indices_key, should_add_elem,
612 decoder_space_usage, elem_hash, key_hash);
616 indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
617 if (grpc_slice_static_interned_equal(
618 c->entries_keys[HASH_FRAGMENT_3(key_hash)], GRPC_MDKEY(elem)) &&
619 indices_key > c->tail_remote_index) {
620 /* HIT: key (second cuckoo hash) */
621 emit_maybe_add(c, elem, st, indices_key, should_add_elem,
622 decoder_space_usage, elem_hash, key_hash);
626 /* no elem, key in the table... fall back to literal emission */
627 const bool should_add_key = !elem_interned && decoder_space_available;
628 if (should_add_elem || should_add_key) {
629 emit_lithdr_v<EmitLitHdrVType::INC_IDX_V>(c, elem, st);
631 emit_lithdr_v<EmitLitHdrVType::NO_IDX_V>(c, elem, st);
633 if (should_add_elem) {
634 add_elem(c, elem, decoder_space_usage, elem_hash, key_hash);
635 } else if (should_add_key) {
636 add_key(c, elem, decoder_space_usage, key_hash);
640 #define STRLEN_LIT(x) (sizeof(x) - 1)
641 #define TIMEOUT_KEY "grpc-timeout"
643 static void deadline_enc(grpc_chttp2_hpack_compressor* c, grpc_millis deadline,
645 char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
647 grpc_http2_encode_timeout(deadline - grpc_core::ExecCtx::Get()->Now(),
649 mdelem = grpc_mdelem_from_slices(
650 GRPC_MDSTR_GRPC_TIMEOUT, grpc_core::UnmanagedMemorySlice(timeout_str));
651 hpack_enc(c, mdelem, st);
652 GRPC_MDELEM_UNREF(mdelem);
655 static uint32_t elems_for_bytes(uint32_t bytes) { return (bytes + 31) / 32; }
657 void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c) {
658 memset(c, 0, sizeof(*c));
659 c->max_table_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
660 c->cap_table_elems = elems_for_bytes(c->max_table_size);
661 c->max_table_elems = c->cap_table_elems;
662 c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
663 c->table_elem_size = static_cast<uint16_t*>(
664 gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems));
665 memset(c->table_elem_size, 0,
666 sizeof(*c->table_elem_size) * c->cap_table_elems);
667 for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
668 c->entries_keys[i] = terminal_slice;
672 void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c) {
674 for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
675 if (c->entries_keys[i].refcount != &terminal_slice_refcount) {
676 grpc_slice_unref_internal(c->entries_keys[i]);
678 GRPC_MDELEM_UNREF(c->entries_elems[i]);
680 gpr_free(c->table_elem_size);
683 void grpc_chttp2_hpack_compressor_set_max_usable_size(
684 grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) {
685 c->max_usable_size = max_table_size;
686 grpc_chttp2_hpack_compressor_set_max_table_size(
687 c, GPR_MIN(c->max_table_size, max_table_size));
690 static void rebuild_elems(grpc_chttp2_hpack_compressor* c, uint32_t new_cap) {
691 uint16_t* table_elem_size =
692 static_cast<uint16_t*>(gpr_malloc(sizeof(*table_elem_size) * new_cap));
695 memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);
696 GPR_ASSERT(c->table_elems <= new_cap);
698 for (i = 0; i < c->table_elems; i++) {
699 uint32_t ofs = c->tail_remote_index + i + 1;
700 table_elem_size[ofs % new_cap] =
701 c->table_elem_size[ofs % c->cap_table_elems];
704 c->cap_table_elems = new_cap;
705 gpr_free(c->table_elem_size);
706 c->table_elem_size = table_elem_size;
709 void grpc_chttp2_hpack_compressor_set_max_table_size(
710 grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) {
711 max_table_size = GPR_MIN(max_table_size, c->max_usable_size);
712 if (max_table_size == c->max_table_size) {
715 while (c->table_size > 0 && c->table_size > max_table_size) {
718 c->max_table_size = max_table_size;
719 c->max_table_elems = elems_for_bytes(max_table_size);
720 if (c->max_table_elems > c->cap_table_elems) {
721 rebuild_elems(c, GPR_MAX(c->max_table_elems, 2 * c->cap_table_elems));
722 } else if (c->max_table_elems < c->cap_table_elems / 3) {
723 uint32_t new_cap = GPR_MAX(c->max_table_elems, 16);
724 if (new_cap != c->cap_table_elems) {
725 rebuild_elems(c, new_cap);
728 c->advertise_table_size_change = 1;
729 if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
730 gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size);
734 void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c,
735 grpc_mdelem** extra_headers,
736 size_t extra_headers_size,
737 grpc_metadata_batch* metadata,
738 const grpc_encode_header_options* options,
739 grpc_slice_buffer* outbuf) {
740 /* grpc_chttp2_encode_header is called by FlushInitial/TrailingMetadata in
741 writing.cc. Specifically, on streams returned by NextStream(), which
742 returns streams from the list GRPC_CHTTP2_LIST_WRITABLE. The only way to be
743 added to the list is via grpc_chttp2_list_add_writable_stream(), which
744 validates that stream_id is not 0. So, this can be a debug assert. */
745 GPR_DEBUG_ASSERT(options->stream_id != 0);
747 st.seen_regular_header = 0;
748 st.stream_id = options->stream_id;
750 st.is_first_frame = 1;
751 st.stats = options->stats;
752 st.max_frame_size = options->max_frame_size;
753 st.use_true_binary_metadata = options->use_true_binary_metadata;
755 /* Encode a metadata batch; store the returned values, representing
756 a metadata element that needs to be unreffed back into the metadata
757 slot. THIS MAY NOT BE THE SAME ELEMENT (if a decoder table slot got
758 updated). After this loop, we'll do a batch unref of elements. */
760 if (c->advertise_table_size_change != 0) {
761 emit_advertise_table_size_change(c, &st);
763 for (size_t i = 0; i < extra_headers_size; ++i) {
764 grpc_mdelem md = *extra_headers[i];
765 const bool is_static =
766 GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_STATIC;
767 uintptr_t static_index;
770 reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(md))
771 ->StaticIndex()) < GRPC_CHTTP2_LAST_STATIC_ENTRY) {
772 emit_indexed(c, static_cast<uint32_t>(static_index + 1), &st);
774 hpack_enc(c, md, &st);
777 grpc_metadata_batch_assert_ok(metadata);
778 for (grpc_linked_mdelem* l = metadata->list.head; l; l = l->next) {
779 const bool is_static =
780 GRPC_MDELEM_STORAGE(l->md) == GRPC_MDELEM_STORAGE_STATIC;
781 uintptr_t static_index;
783 (static_index = reinterpret_cast<grpc_core::StaticMetadata*>(
784 GRPC_MDELEM_DATA(l->md))
785 ->StaticIndex()) < GRPC_CHTTP2_LAST_STATIC_ENTRY) {
786 emit_indexed(c, static_cast<uint32_t>(static_index + 1), &st);
788 hpack_enc(c, l->md, &st);
791 grpc_millis deadline = metadata->deadline;
792 if (deadline != GRPC_MILLIS_INF_FUTURE) {
793 deadline_enc(c, deadline, &st);
796 finish_frame(&st, 1, options->is_eof);