mps_reader.c (19418B)
1 /* 2 * Message Processing Stack, Reader implementation 3 * 4 * Copyright The Mbed TLS Contributors 5 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later 6 */ 7 8 #include "common.h" 9 10 #if defined(MBEDTLS_SSL_PROTO_TLS1_3) 11 12 #include "mps_reader.h" 13 #include "mps_common.h" 14 #include "mps_trace.h" 15 16 #include <string.h> 17 18 #if defined(MBEDTLS_MPS_ENABLE_TRACE) 19 static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER; 20 #endif /* MBEDTLS_MPS_ENABLE_TRACE */ 21 22 /* 23 * GENERAL NOTE ON CODING STYLE 24 * 25 * The following code intentionally separates memory loads 26 * and stores from other operations (arithmetic or branches). 27 * This leads to the introduction of many local variables 28 * and significantly increases the C-code line count, but 29 * should not increase the size of generated assembly. 30 * 31 * The reason for this is twofold: 32 * (1) It will ease verification efforts using the VST 33 * (Verified Software Toolchain) 34 * whose program logic cannot directly reason 35 * about instructions containing a load or store in 36 * addition to other operations (e.g. *p = *q or 37 * tmp = *p + 42). 38 * (2) Operating on local variables and writing the results 39 * back to the target contexts on success only 40 * allows to maintain structure invariants even 41 * on failure - this in turn has two benefits: 42 * (2.a) If for some reason an error code is not caught 43 * and operation continues, functions are nonetheless 44 * called with sane contexts, reducing the risk 45 * of dangerous behavior. 46 * (2.b) Randomized testing is easier if structures 47 * remain intact even in the face of failing 48 * and/or non-sensical calls. 49 * Moreover, it might even reduce code-size because 50 * the compiler need not write back temporary results 51 * to memory in case of failure. 52 * 53 */ 54 55 static inline int mps_reader_is_accumulating( 56 mbedtls_mps_reader const *rd) 57 { 58 mbedtls_mps_size_t acc_remaining; 59 if (rd->acc == NULL) { 60 return 0; 61 } 62 63 acc_remaining = rd->acc_share.acc_remaining; 64 return acc_remaining > 0; 65 } 66 67 static inline int mps_reader_is_producing( 68 mbedtls_mps_reader const *rd) 69 { 70 unsigned char *frag = rd->frag; 71 return frag == NULL; 72 } 73 74 static inline int mps_reader_is_consuming( 75 mbedtls_mps_reader const *rd) 76 { 77 return !mps_reader_is_producing(rd); 78 } 79 80 static inline mbedtls_mps_size_t mps_reader_get_fragment_offset( 81 mbedtls_mps_reader const *rd) 82 { 83 unsigned char *acc = rd->acc; 84 mbedtls_mps_size_t frag_offset; 85 86 if (acc == NULL) { 87 return 0; 88 } 89 90 frag_offset = rd->acc_share.frag_offset; 91 return frag_offset; 92 } 93 94 static inline mbedtls_mps_size_t mps_reader_serving_from_accumulator( 95 mbedtls_mps_reader const *rd) 96 { 97 mbedtls_mps_size_t frag_offset, end; 98 99 frag_offset = mps_reader_get_fragment_offset(rd); 100 end = rd->end; 101 102 return end < frag_offset; 103 } 104 105 static inline void mps_reader_zero(mbedtls_mps_reader *rd) 106 { 107 /* A plain memset() would likely be more efficient, 108 * but the current way of zeroing makes it harder 109 * to overlook fields which should not be zero-initialized. 110 * It's also more suitable for FV efforts since it 111 * doesn't require reasoning about structs being 112 * interpreted as unstructured binary blobs. */ 113 static mbedtls_mps_reader const zero = 114 { .frag = NULL, 115 .frag_len = 0, 116 .commit = 0, 117 .end = 0, 118 .pending = 0, 119 .acc = NULL, 120 .acc_len = 0, 121 .acc_available = 0, 122 .acc_share = { .acc_remaining = 0 } }; 123 *rd = zero; 124 } 125 126 int mbedtls_mps_reader_init(mbedtls_mps_reader *rd, 127 unsigned char *acc, 128 mbedtls_mps_size_t acc_len) 129 { 130 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_init"); 131 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 132 "* Accumulator size: %u bytes", (unsigned) acc_len); 133 mps_reader_zero(rd); 134 rd->acc = acc; 135 rd->acc_len = acc_len; 136 MBEDTLS_MPS_TRACE_RETURN(0); 137 } 138 139 int mbedtls_mps_reader_free(mbedtls_mps_reader *rd) 140 { 141 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_free"); 142 mps_reader_zero(rd); 143 MBEDTLS_MPS_TRACE_RETURN(0); 144 } 145 146 int mbedtls_mps_reader_feed(mbedtls_mps_reader *rd, 147 unsigned char *new_frag, 148 mbedtls_mps_size_t new_frag_len) 149 { 150 mbedtls_mps_size_t copy_to_acc; 151 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_feed"); 152 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 153 "* Fragment length: %u bytes", (unsigned) new_frag_len); 154 155 if (new_frag == NULL) { 156 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_INVALID_ARG); 157 } 158 159 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_producing( 160 rd), 161 "mbedtls_mps_reader_feed() requires reader to be in producing mode"); 162 163 if (mps_reader_is_accumulating(rd)) { 164 unsigned char *acc = rd->acc; 165 mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining; 166 mbedtls_mps_size_t acc_available = rd->acc_available; 167 168 /* Skip over parts of the accumulator that have already been filled. */ 169 acc += acc_available; 170 171 copy_to_acc = acc_remaining; 172 if (copy_to_acc > new_frag_len) { 173 copy_to_acc = new_frag_len; 174 } 175 176 /* Copy new contents to accumulator. */ 177 memcpy(acc, new_frag, copy_to_acc); 178 179 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 180 "Copy new data of size %u of %u into accumulator at offset %u", 181 (unsigned) copy_to_acc, (unsigned) new_frag_len, 182 (unsigned) acc_available); 183 184 /* Check if, with the new fragment, we have enough data. */ 185 acc_remaining -= copy_to_acc; 186 if (acc_remaining > 0) { 187 /* We need to accumulate more data. Stay in producing mode. */ 188 acc_available += copy_to_acc; 189 rd->acc_share.acc_remaining = acc_remaining; 190 rd->acc_available = acc_available; 191 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_NEED_MORE); 192 } 193 194 /* We have filled the accumulator: Move to consuming mode. */ 195 196 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 197 "Enough data available to serve user request"); 198 199 /* Remember overlap of accumulator and fragment. */ 200 rd->acc_share.frag_offset = acc_available; 201 acc_available += copy_to_acc; 202 rd->acc_available = acc_available; 203 } else { /* Not accumulating */ 204 rd->acc_share.frag_offset = 0; 205 } 206 207 rd->frag = new_frag; 208 rd->frag_len = new_frag_len; 209 rd->commit = 0; 210 rd->end = 0; 211 MBEDTLS_MPS_TRACE_RETURN(0); 212 } 213 214 215 int mbedtls_mps_reader_get(mbedtls_mps_reader *rd, 216 mbedtls_mps_size_t desired, 217 unsigned char **buffer, 218 mbedtls_mps_size_t *buflen) 219 { 220 unsigned char *frag; 221 mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining; 222 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_get"); 223 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 224 "* Bytes requested: %u", (unsigned) desired); 225 226 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming( 227 rd), 228 "mbedtls_mps_reader_get() requires reader to be in consuming mode"); 229 230 end = rd->end; 231 frag_offset = mps_reader_get_fragment_offset(rd); 232 233 /* Check if we're still serving from the accumulator. */ 234 if (mps_reader_serving_from_accumulator(rd)) { 235 /* Illustration of supported and unsupported cases: 236 * 237 * - Allowed #1 238 * 239 * +-----------------------------------+ 240 * | frag | 241 * +-----------------------------------+ 242 * 243 * end end+desired 244 * | | 245 * +-----v-------v-------------+ 246 * | acc | 247 * +---------------------------+ 248 * | | 249 * frag_offset acc_available 250 * 251 * - Allowed #2 252 * 253 * +-----------------------------------+ 254 * | frag | 255 * +-----------------------------------+ 256 * 257 * end end+desired 258 * | | 259 * +----------v----------------v 260 * | acc | 261 * +---------------------------+ 262 * | | 263 * frag_offset acc_available 264 * 265 * - Not allowed #1 (could be served, but we don't actually use it): 266 * 267 * +-----------------------------------+ 268 * | frag | 269 * +-----------------------------------+ 270 * 271 * end end+desired 272 * | | 273 * +------v-------------v------+ 274 * | acc | 275 * +---------------------------+ 276 * | | 277 * frag_offset acc_available 278 * 279 * 280 * - Not allowed #2 (can't be served with a contiguous buffer): 281 * 282 * +-----------------------------------+ 283 * | frag | 284 * +-----------------------------------+ 285 * 286 * end end + desired 287 * | | 288 * +------v--------------------+ v 289 * | acc | 290 * +---------------------------+ 291 * | | 292 * frag_offset acc_available 293 * 294 * In case of Allowed #2 we're switching to serve from 295 * `frag` starting from the next call to mbedtls_mps_reader_get(). 296 */ 297 298 unsigned char *acc; 299 300 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 301 "Serve the request from the accumulator"); 302 if (frag_offset - end < desired) { 303 mbedtls_mps_size_t acc_available; 304 acc_available = rd->acc_available; 305 if (acc_available - end != desired) { 306 /* It might be possible to serve some of these situations by 307 * making additional space in the accumulator, removing those 308 * parts that have already been committed. 309 * On the other hand, this brings additional complexity and 310 * enlarges the code size, while there doesn't seem to be a use 311 * case where we don't attempt exactly the same `get` calls when 312 * resuming on a reader than what we tried before pausing it. 313 * If we believe we adhere to this restricted usage throughout 314 * the library, this check is a good opportunity to 315 * validate this. */ 316 MBEDTLS_MPS_TRACE_RETURN( 317 MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS); 318 } 319 } 320 321 acc = rd->acc; 322 acc += end; 323 324 *buffer = acc; 325 if (buflen != NULL) { 326 *buflen = desired; 327 } 328 329 end += desired; 330 rd->end = end; 331 rd->pending = 0; 332 333 MBEDTLS_MPS_TRACE_RETURN(0); 334 } 335 336 /* Attempt to serve the request from the current fragment */ 337 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 338 "Serve the request from the current fragment."); 339 340 frag_len = rd->frag_len; 341 frag_fetched = end - frag_offset; /* The amount of data from the current 342 * fragment that has already been passed 343 * to the user. */ 344 frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */ 345 346 /* Check if we can serve the read request from the fragment. */ 347 if (frag_remaining < desired) { 348 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 349 "There's not enough data in the current fragment " 350 "to serve the request."); 351 /* There's not enough data in the current fragment, 352 * so either just RETURN what we have or fail. */ 353 if (buflen == NULL) { 354 if (frag_remaining > 0) { 355 rd->pending = desired - frag_remaining; 356 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 357 "Remember to collect %u bytes before re-opening", 358 (unsigned) rd->pending); 359 } 360 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_OUT_OF_DATA); 361 } 362 363 desired = frag_remaining; 364 } 365 366 /* There's enough data in the current fragment to serve the 367 * (potentially modified) read request. */ 368 369 frag = rd->frag; 370 frag += frag_fetched; 371 372 *buffer = frag; 373 if (buflen != NULL) { 374 *buflen = desired; 375 } 376 377 end += desired; 378 rd->end = end; 379 rd->pending = 0; 380 MBEDTLS_MPS_TRACE_RETURN(0); 381 } 382 383 int mbedtls_mps_reader_commit(mbedtls_mps_reader *rd) 384 { 385 mbedtls_mps_size_t end; 386 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_commit"); 387 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming( 388 rd), 389 "mbedtls_mps_reader_commit() requires reader to be in consuming mode"); 390 391 end = rd->end; 392 rd->commit = end; 393 394 MBEDTLS_MPS_TRACE_RETURN(0); 395 } 396 397 int mbedtls_mps_reader_reclaim(mbedtls_mps_reader *rd, 398 int *paused) 399 { 400 unsigned char *frag, *acc; 401 mbedtls_mps_size_t pending, commit; 402 mbedtls_mps_size_t acc_len, frag_offset, frag_len; 403 MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_reclaim"); 404 405 if (paused != NULL) { 406 *paused = 0; 407 } 408 409 MBEDTLS_MPS_STATE_VALIDATE_RAW(mps_reader_is_consuming( 410 rd), 411 "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode"); 412 413 frag = rd->frag; 414 acc = rd->acc; 415 pending = rd->pending; 416 commit = rd->commit; 417 frag_len = rd->frag_len; 418 419 frag_offset = mps_reader_get_fragment_offset(rd); 420 421 if (pending == 0) { 422 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 423 "No unsatisfied read-request has been logged."); 424 425 /* Check if there's data left to be consumed. */ 426 if (commit < frag_offset || commit - frag_offset < frag_len) { 427 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 428 "There is data left to be consumed."); 429 rd->end = commit; 430 MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_DATA_LEFT); 431 } 432 433 rd->acc_available = 0; 434 rd->acc_share.acc_remaining = 0; 435 436 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 437 "Fragment has been fully processed and committed."); 438 } else { 439 int overflow; 440 441 mbedtls_mps_size_t acc_backup_offset; 442 mbedtls_mps_size_t acc_backup_len; 443 mbedtls_mps_size_t frag_backup_offset; 444 mbedtls_mps_size_t frag_backup_len; 445 446 mbedtls_mps_size_t backup_len; 447 mbedtls_mps_size_t acc_len_needed; 448 449 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 450 "There has been an unsatisfied read with %u bytes overhead.", 451 (unsigned) pending); 452 453 if (acc == NULL) { 454 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 455 "No accumulator present"); 456 MBEDTLS_MPS_TRACE_RETURN( 457 MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR); 458 } 459 acc_len = rd->acc_len; 460 461 /* Check if the upper layer has already fetched 462 * and committed the contents of the accumulator. */ 463 if (commit < frag_offset) { 464 /* No, accumulator is still being processed. */ 465 frag_backup_offset = 0; 466 frag_backup_len = frag_len; 467 acc_backup_offset = commit; 468 acc_backup_len = frag_offset - commit; 469 } else { 470 /* Yes, the accumulator is already processed. */ 471 frag_backup_offset = commit - frag_offset; 472 frag_backup_len = frag_len - frag_backup_offset; 473 acc_backup_offset = 0; 474 acc_backup_len = 0; 475 } 476 477 backup_len = acc_backup_len + frag_backup_len; 478 acc_len_needed = backup_len + pending; 479 480 overflow = 0; 481 overflow |= (backup_len < acc_backup_len); 482 overflow |= (acc_len_needed < backup_len); 483 484 if (overflow || acc_len < acc_len_needed) { 485 /* Except for the different return code, we behave as if 486 * there hadn't been a call to mbedtls_mps_reader_get() 487 * since the last commit. */ 488 rd->end = commit; 489 rd->pending = 0; 490 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR, 491 "The accumulator is too small to handle the backup."); 492 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR, 493 "* Size: %u", (unsigned) acc_len); 494 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR, 495 "* Needed: %u (%u + %u)", 496 (unsigned) acc_len_needed, 497 (unsigned) backup_len, (unsigned) pending); 498 MBEDTLS_MPS_TRACE_RETURN( 499 MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL); 500 } 501 502 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 503 "Fragment backup: %u", (unsigned) frag_backup_len); 504 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 505 "Accumulator backup: %u", (unsigned) acc_backup_len); 506 507 /* Move uncommitted parts from the accumulator to the front 508 * of the accumulator. */ 509 memmove(acc, acc + acc_backup_offset, acc_backup_len); 510 511 /* Copy uncommitted parts of the current fragment to the 512 * accumulator. */ 513 memcpy(acc + acc_backup_len, 514 frag + frag_backup_offset, frag_backup_len); 515 516 rd->acc_available = backup_len; 517 rd->acc_share.acc_remaining = pending; 518 519 if (paused != NULL) { 520 *paused = 1; 521 } 522 } 523 524 rd->frag = NULL; 525 rd->frag_len = 0; 526 527 rd->commit = 0; 528 rd->end = 0; 529 rd->pending = 0; 530 531 MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, 532 "Final state: aa %u, al %u, ar %u", 533 (unsigned) rd->acc_available, (unsigned) rd->acc_len, 534 (unsigned) rd->acc_share.acc_remaining); 535 MBEDTLS_MPS_TRACE_RETURN(0); 536 } 537 538 #endif /* MBEDTLS_SSL_PROTO_TLS1_3 */