blob: c5b56080616465ae387b4b385087f4b5afde974c [file] [log] [blame]
David Benjaminb8d28cf2015-07-28 21:34:45 -04001/* Copyright (c) 2015, Google Inc.
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
14
David Benjamin86e95b82017-07-18 16:34:25 -040015#define BORINGSSL_INTERNAL_CXX_TYPES
16
David Benjaminb8d28cf2015-07-28 21:34:45 -040017#include <openssl/ssl.h>
18
19#include <assert.h>
20#include <limits.h>
21#include <stdlib.h>
22#include <string.h>
23
24#include <openssl/bio.h>
25#include <openssl/err.h>
26#include <openssl/mem.h>
David Benjaminb8d28cf2015-07-28 21:34:45 -040027
David Benjamin17cf2cb2016-12-13 01:07:13 -050028#include "../crypto/internal.h"
David Benjaminb8d28cf2015-07-28 21:34:45 -040029#include "internal.h"
30
31
David Benjamin86e95b82017-07-18 16:34:25 -040032namespace bssl {
33
David Benjamina3d76d02017-07-14 19:36:07 -040034/* BIO uses int instead of size_t. No lengths will exceed uint16_t, so this will
35 * not overflow. */
36static_assert(0xffff <= INT_MAX, "uint16_t does not fit in int");
David Benjaminb8d28cf2015-07-28 21:34:45 -040037
David Benjamina3d76d02017-07-14 19:36:07 -040038static_assert((SSL3_ALIGN_PAYLOAD & (SSL3_ALIGN_PAYLOAD - 1)) == 0,
39 "SSL3_ALIGN_PAYLOAD must be a power of 2");
David Benjaminb8d28cf2015-07-28 21:34:45 -040040
David Benjamin31209502017-06-22 18:07:15 -040041/* ensure_buffer ensures |buf| has capacity at least |cap|, aligned such that
42 * data written after |header_len| is aligned to a |SSL3_ALIGN_PAYLOAD|-byte
David Benjaminb8d28cf2015-07-28 21:34:45 -040043 * boundary. It returns one on success and zero on error. */
David Benjamin31209502017-06-22 18:07:15 -040044static int ensure_buffer(SSL3_BUFFER *buf, size_t header_len, size_t cap) {
45 if (cap > 0xffff) {
David Benjaminb8d28cf2015-07-28 21:34:45 -040046 OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR);
47 return 0;
48 }
49
David Benjamin31209502017-06-22 18:07:15 -040050 if (buf->cap >= cap) {
51 return 1;
52 }
53
David Benjaminb8d28cf2015-07-28 21:34:45 -040054 /* Add up to |SSL3_ALIGN_PAYLOAD| - 1 bytes of slack for alignment. */
David Benjamine64d2c72017-07-12 16:31:08 -040055 uint8_t *new_buf = (uint8_t *)OPENSSL_malloc(cap + SSL3_ALIGN_PAYLOAD - 1);
David Benjamin31209502017-06-22 18:07:15 -040056 if (new_buf == NULL) {
David Benjaminb8d28cf2015-07-28 21:34:45 -040057 OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE);
58 return 0;
59 }
60
David Benjamin31209502017-06-22 18:07:15 -040061 /* Offset the buffer such that the record body is aligned. */
62 size_t new_offset =
63 (0 - header_len - (uintptr_t)new_buf) & (SSL3_ALIGN_PAYLOAD - 1);
64
65 if (buf->buf != NULL) {
66 OPENSSL_memcpy(new_buf + new_offset, buf->buf + buf->offset, buf->len);
67 OPENSSL_free(buf->buf);
68 }
69
70 buf->buf = new_buf;
71 buf->offset = new_offset;
David Benjaminb8d28cf2015-07-28 21:34:45 -040072 buf->cap = cap;
73 return 1;
74}
75
76static void consume_buffer(SSL3_BUFFER *buf, size_t len) {
77 if (len > buf->len) {
78 abort();
79 }
80 buf->offset += (uint16_t)len;
81 buf->len -= (uint16_t)len;
82 buf->cap -= (uint16_t)len;
83}
84
85static void clear_buffer(SSL3_BUFFER *buf) {
86 OPENSSL_free(buf->buf);
David Benjamin17cf2cb2016-12-13 01:07:13 -050087 OPENSSL_memset(buf, 0, sizeof(SSL3_BUFFER));
David Benjaminb8d28cf2015-07-28 21:34:45 -040088}
89
David Benjaminb8d28cf2015-07-28 21:34:45 -040090uint8_t *ssl_read_buffer(SSL *ssl) {
91 return ssl->s3->read_buffer.buf + ssl->s3->read_buffer.offset;
92}
93
94size_t ssl_read_buffer_len(const SSL *ssl) {
95 return ssl->s3->read_buffer.len;
96}
97
98static int dtls_read_buffer_next_packet(SSL *ssl) {
99 SSL3_BUFFER *buf = &ssl->s3->read_buffer;
100
101 if (buf->len > 0) {
102 /* It is an error to call |dtls_read_buffer_extend| when the read buffer is
103 * not empty. */
104 OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR);
105 return -1;
106 }
107
108 /* Read a single packet from |ssl->rbio|. |buf->cap| must fit in an int. */
David Benjaminb8d28cf2015-07-28 21:34:45 -0400109 int ret = BIO_read(ssl->rbio, buf->buf + buf->offset, (int)buf->cap);
110 if (ret <= 0) {
David Benjamin4c5ddb82016-03-11 22:56:19 -0500111 ssl->rwstate = SSL_READING;
David Benjaminb8d28cf2015-07-28 21:34:45 -0400112 return ret;
113 }
David Benjaminb8d28cf2015-07-28 21:34:45 -0400114 /* |BIO_read| was bound by |buf->cap|, so this cannot overflow. */
115 buf->len = (uint16_t)ret;
116 return 1;
117}
118
119static int tls_read_buffer_extend_to(SSL *ssl, size_t len) {
120 SSL3_BUFFER *buf = &ssl->s3->read_buffer;
121
122 if (len > buf->cap) {
David Benjaminb8d28cf2015-07-28 21:34:45 -0400123 OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL);
124 return -1;
125 }
126
127 /* Read until the target length is reached. */
128 while (buf->len < len) {
129 /* The amount of data to read is bounded by |buf->cap|, which must fit in an
130 * int. */
David Benjaminb8d28cf2015-07-28 21:34:45 -0400131 int ret = BIO_read(ssl->rbio, buf->buf + buf->offset + buf->len,
132 (int)(len - buf->len));
133 if (ret <= 0) {
David Benjamin4c5ddb82016-03-11 22:56:19 -0500134 ssl->rwstate = SSL_READING;
David Benjaminb8d28cf2015-07-28 21:34:45 -0400135 return ret;
136 }
David Benjaminb8d28cf2015-07-28 21:34:45 -0400137 /* |BIO_read| was bound by |buf->cap - buf->len|, so this cannot
138 * overflow. */
139 buf->len += (uint16_t)ret;
140 }
141
142 return 1;
143}
144
145int ssl_read_buffer_extend_to(SSL *ssl, size_t len) {
146 /* |ssl_read_buffer_extend_to| implicitly discards any consumed data. */
147 ssl_read_buffer_discard(ssl);
148
David Benjamin31209502017-06-22 18:07:15 -0400149 if (SSL_is_dtls(ssl)) {
David Benjamina3d76d02017-07-14 19:36:07 -0400150 static_assert(
David Benjamin31209502017-06-22 18:07:15 -0400151 DTLS1_RT_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH <= 0xffff,
David Benjamina3d76d02017-07-14 19:36:07 -0400152 "DTLS read buffer is too large");
David Benjamin31209502017-06-22 18:07:15 -0400153
154 /* The |len| parameter is ignored in DTLS. */
155 len = DTLS1_RT_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH;
156 }
157
158 if (!ensure_buffer(&ssl->s3->read_buffer, ssl_record_prefix_len(ssl), len)) {
David Benjaminb8d28cf2015-07-28 21:34:45 -0400159 return -1;
160 }
161
162 if (ssl->rbio == NULL) {
163 OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET);
164 return -1;
165 }
166
David Benjaminb8d28cf2015-07-28 21:34:45 -0400167 int ret;
David Benjamince079fd2016-08-02 16:22:34 -0400168 if (SSL_is_dtls(ssl)) {
David Benjaminb8d28cf2015-07-28 21:34:45 -0400169 /* |len| is ignored for a datagram transport. */
170 ret = dtls_read_buffer_next_packet(ssl);
171 } else {
172 ret = tls_read_buffer_extend_to(ssl, len);
173 }
174
175 if (ret <= 0) {
176 /* If the buffer was empty originally and remained empty after attempting to
177 * extend it, release the buffer until the next attempt. */
178 ssl_read_buffer_discard(ssl);
179 }
180 return ret;
181}
182
183void ssl_read_buffer_consume(SSL *ssl, size_t len) {
184 SSL3_BUFFER *buf = &ssl->s3->read_buffer;
185
186 consume_buffer(buf, len);
David Benjamin728f3542016-06-02 15:42:01 -0400187
188 /* The TLS stack never reads beyond the current record, so there will never be
189 * unconsumed data. If read-ahead is ever reimplemented,
190 * |ssl_read_buffer_discard| will require a |memcpy| to shift the excess back
191 * to the front of the buffer, to ensure there is enough space for the next
192 * record. */
David Benjamince079fd2016-08-02 16:22:34 -0400193 assert(SSL_is_dtls(ssl) || len == 0 || buf->len == 0);
David Benjaminb8d28cf2015-07-28 21:34:45 -0400194}
195
196void ssl_read_buffer_discard(SSL *ssl) {
197 if (ssl->s3->read_buffer.len == 0) {
198 ssl_read_buffer_clear(ssl);
199 }
200}
201
202void ssl_read_buffer_clear(SSL *ssl) {
203 clear_buffer(&ssl->s3->read_buffer);
204}
205
206
207int ssl_write_buffer_is_pending(const SSL *ssl) {
208 return ssl->s3->write_buffer.len > 0;
209}
210
David Benjamina3d76d02017-07-14 19:36:07 -0400211static_assert(SSL3_RT_HEADER_LENGTH * 2 +
212 SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD * 2 +
213 SSL3_RT_MAX_PLAIN_LENGTH <=
214 0xffff,
215 "maximum TLS write buffer is too large");
David Benjaminb8d28cf2015-07-28 21:34:45 -0400216
David Benjamina3d76d02017-07-14 19:36:07 -0400217static_assert(DTLS1_RT_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD +
218 SSL3_RT_MAX_PLAIN_LENGTH <=
219 0xffff,
220 "maximum DTLS write buffer is too large");
David Benjaminb8d28cf2015-07-28 21:34:45 -0400221
222int ssl_write_buffer_init(SSL *ssl, uint8_t **out_ptr, size_t max_len) {
223 SSL3_BUFFER *buf = &ssl->s3->write_buffer;
224
225 if (buf->buf != NULL) {
226 OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR);
227 return 0;
228 }
229
David Benjamin31209502017-06-22 18:07:15 -0400230 if (!ensure_buffer(buf, ssl_seal_align_prefix_len(ssl), max_len)) {
David Benjaminb8d28cf2015-07-28 21:34:45 -0400231 return 0;
232 }
233 *out_ptr = buf->buf + buf->offset;
234 return 1;
235}
236
237void ssl_write_buffer_set_len(SSL *ssl, size_t len) {
238 SSL3_BUFFER *buf = &ssl->s3->write_buffer;
239
240 if (len > buf->cap) {
241 abort();
242 }
243 buf->len = len;
244}
245
246static int tls_write_buffer_flush(SSL *ssl) {
247 SSL3_BUFFER *buf = &ssl->s3->write_buffer;
248
249 while (buf->len > 0) {
David Benjaminb8d28cf2015-07-28 21:34:45 -0400250 int ret = BIO_write(ssl->wbio, buf->buf + buf->offset, buf->len);
251 if (ret <= 0) {
David Benjamin4c5ddb82016-03-11 22:56:19 -0500252 ssl->rwstate = SSL_WRITING;
David Benjaminb8d28cf2015-07-28 21:34:45 -0400253 return ret;
254 }
David Benjaminb8d28cf2015-07-28 21:34:45 -0400255 consume_buffer(buf, (size_t)ret);
256 }
257 ssl_write_buffer_clear(ssl);
258 return 1;
259}
260
261static int dtls_write_buffer_flush(SSL *ssl) {
262 SSL3_BUFFER *buf = &ssl->s3->write_buffer;
263 if (buf->len == 0) {
264 return 1;
265 }
266
267 int ret = BIO_write(ssl->wbio, buf->buf + buf->offset, buf->len);
David Benjamin13e81fc2015-11-02 17:16:13 -0500268 if (ret <= 0) {
David Benjamin4c5ddb82016-03-11 22:56:19 -0500269 ssl->rwstate = SSL_WRITING;
David Benjamin13e81fc2015-11-02 17:16:13 -0500270 /* If the write failed, drop the write buffer anyway. Datagram transports
271 * can't write half a packet, so the caller is expected to retry from the
272 * top. */
273 ssl_write_buffer_clear(ssl);
274 return ret;
275 }
David Benjaminb8d28cf2015-07-28 21:34:45 -0400276 ssl_write_buffer_clear(ssl);
David Benjamin13e81fc2015-11-02 17:16:13 -0500277 return 1;
David Benjaminb8d28cf2015-07-28 21:34:45 -0400278}
279
280int ssl_write_buffer_flush(SSL *ssl) {
281 if (ssl->wbio == NULL) {
282 OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET);
283 return -1;
284 }
David Benjaminb8d28cf2015-07-28 21:34:45 -0400285
David Benjamince079fd2016-08-02 16:22:34 -0400286 if (SSL_is_dtls(ssl)) {
David Benjaminb8d28cf2015-07-28 21:34:45 -0400287 return dtls_write_buffer_flush(ssl);
288 } else {
289 return tls_write_buffer_flush(ssl);
290 }
291}
292
293void ssl_write_buffer_clear(SSL *ssl) {
294 clear_buffer(&ssl->s3->write_buffer);
295}
David Benjamin86e95b82017-07-18 16:34:25 -0400296
297} // namespace bssl