blob: 90c1a78793f9341ebb42d3e07a39f5efffff172b [file] [log] [blame]
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001/*
2 * libjingle
3 * Copyright 2004--2005, Google Inc.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28// Copyright 2005 Google Inc. All Rights Reserved.
29//
30
31
32#ifdef WIN32
33#include "talk/base/win32.h"
34#else // !WIN32
35#define SEC_E_CERT_EXPIRED (-2146893016)
36#endif // !WIN32
37
38#include "talk/base/common.h"
39#include "talk/base/httpbase.h"
40#include "talk/base/logging.h"
41#include "talk/base/socket.h"
42#include "talk/base/stringutils.h"
43#include "talk/base/thread.h"
44
45namespace talk_base {
46
47//////////////////////////////////////////////////////////////////////
48// Helpers
49//////////////////////////////////////////////////////////////////////
50
51bool MatchHeader(const char* str, size_t len, HttpHeader header) {
52 const char* const header_str = ToString(header);
53 const size_t header_len = strlen(header_str);
54 return (len == header_len) && (_strnicmp(str, header_str, header_len) == 0);
55}
56
57enum {
58 MSG_READ
59};
60
61//////////////////////////////////////////////////////////////////////
62// HttpParser
63//////////////////////////////////////////////////////////////////////
64
65HttpParser::HttpParser() {
66 reset();
67}
68
69HttpParser::~HttpParser() {
70}
71
72void
73HttpParser::reset() {
74 state_ = ST_LEADER;
75 chunked_ = false;
76 data_size_ = SIZE_UNKNOWN;
77}
78
79HttpParser::ProcessResult
80HttpParser::Process(const char* buffer, size_t len, size_t* processed,
81 HttpError* error) {
82 *processed = 0;
83 *error = HE_NONE;
84
85 if (state_ >= ST_COMPLETE) {
86 ASSERT(false);
87 return PR_COMPLETE;
88 }
89
90 while (true) {
91 if (state_ < ST_DATA) {
92 size_t pos = *processed;
93 while ((pos < len) && (buffer[pos] != '\n')) {
94 pos += 1;
95 }
96 if (pos >= len) {
97 break; // don't have a full header
98 }
99 const char* line = buffer + *processed;
100 size_t len = (pos - *processed);
101 *processed = pos + 1;
102 while ((len > 0) && isspace(static_cast<unsigned char>(line[len-1]))) {
103 len -= 1;
104 }
105 ProcessResult result = ProcessLine(line, len, error);
106 LOG(LS_VERBOSE) << "Processed line, result=" << result;
107
108 if (PR_CONTINUE != result) {
109 return result;
110 }
111 } else if (data_size_ == 0) {
112 if (chunked_) {
113 state_ = ST_CHUNKTERM;
114 } else {
115 return PR_COMPLETE;
116 }
117 } else {
118 size_t available = len - *processed;
119 if (available <= 0) {
120 break; // no more data
121 }
122 if ((data_size_ != SIZE_UNKNOWN) && (available > data_size_)) {
123 available = data_size_;
124 }
125 size_t read = 0;
126 ProcessResult result = ProcessData(buffer + *processed, available, read,
127 error);
128 LOG(LS_VERBOSE) << "Processed data, result: " << result << " read: "
129 << read << " err: " << error;
130
131 if (PR_CONTINUE != result) {
132 return result;
133 }
134 *processed += read;
135 if (data_size_ != SIZE_UNKNOWN) {
136 data_size_ -= read;
137 }
138 }
139 }
140
141 return PR_CONTINUE;
142}
143
144HttpParser::ProcessResult
145HttpParser::ProcessLine(const char* line, size_t len, HttpError* error) {
146 LOG_F(LS_VERBOSE) << " state: " << state_ << " line: "
147 << std::string(line, len) << " len: " << len << " err: "
148 << error;
149
150 switch (state_) {
151 case ST_LEADER:
152 state_ = ST_HEADERS;
153 return ProcessLeader(line, len, error);
154
155 case ST_HEADERS:
156 if (len > 0) {
157 const char* value = strchrn(line, len, ':');
158 if (!value) {
159 *error = HE_PROTOCOL;
160 return PR_COMPLETE;
161 }
162 size_t nlen = (value - line);
163 const char* eol = line + len;
164 do {
165 value += 1;
166 } while ((value < eol) && isspace(static_cast<unsigned char>(*value)));
167 size_t vlen = eol - value;
168 if (MatchHeader(line, nlen, HH_CONTENT_LENGTH)) {
169 unsigned int temp_size;
170 if (sscanf(value, "%u", &temp_size) != 1) {
171 *error = HE_PROTOCOL;
172 return PR_COMPLETE;
173 }
174 data_size_ = static_cast<size_t>(temp_size);
175 } else if (MatchHeader(line, nlen, HH_TRANSFER_ENCODING)) {
176 if ((vlen == 7) && (_strnicmp(value, "chunked", 7) == 0)) {
177 chunked_ = true;
178 } else if ((vlen == 8) && (_strnicmp(value, "identity", 8) == 0)) {
179 chunked_ = false;
180 } else {
181 *error = HE_PROTOCOL;
182 return PR_COMPLETE;
183 }
184 }
185 return ProcessHeader(line, nlen, value, vlen, error);
186 } else {
187 state_ = chunked_ ? ST_CHUNKSIZE : ST_DATA;
188 return ProcessHeaderComplete(chunked_, data_size_, error);
189 }
190 break;
191
192 case ST_CHUNKSIZE:
193 if (len > 0) {
194 char* ptr = NULL;
195 data_size_ = strtoul(line, &ptr, 16);
196 if (ptr != line + len) {
197 *error = HE_PROTOCOL;
198 return PR_COMPLETE;
199 }
200 state_ = (data_size_ == 0) ? ST_TRAILERS : ST_DATA;
201 } else {
202 *error = HE_PROTOCOL;
203 return PR_COMPLETE;
204 }
205 break;
206
207 case ST_CHUNKTERM:
208 if (len > 0) {
209 *error = HE_PROTOCOL;
210 return PR_COMPLETE;
211 } else {
212 state_ = chunked_ ? ST_CHUNKSIZE : ST_DATA;
213 }
214 break;
215
216 case ST_TRAILERS:
217 if (len == 0) {
218 return PR_COMPLETE;
219 }
220 // *error = onHttpRecvTrailer();
221 break;
222
223 default:
224 ASSERT(false);
225 break;
226 }
227
228 return PR_CONTINUE;
229}
230
231bool
232HttpParser::is_valid_end_of_input() const {
233 return (state_ == ST_DATA) && (data_size_ == SIZE_UNKNOWN);
234}
235
236void
237HttpParser::complete(HttpError error) {
238 if (state_ < ST_COMPLETE) {
239 state_ = ST_COMPLETE;
240 OnComplete(error);
241 }
242}
243
244//////////////////////////////////////////////////////////////////////
245// HttpBase::DocumentStream
246//////////////////////////////////////////////////////////////////////
247
248class BlockingMemoryStream : public ExternalMemoryStream {
249public:
250 BlockingMemoryStream(char* buffer, size_t size)
251 : ExternalMemoryStream(buffer, size) { }
252
253 virtual StreamResult DoReserve(size_t size, int* error) {
254 return (buffer_length_ >= size) ? SR_SUCCESS : SR_BLOCK;
255 }
256};
257
258class HttpBase::DocumentStream : public StreamInterface {
259public:
260 DocumentStream(HttpBase* base) : base_(base), error_(HE_DEFAULT) { }
261
262 virtual StreamState GetState() const {
263 if (NULL == base_)
264 return SS_CLOSED;
265 if (HM_RECV == base_->mode_)
266 return SS_OPEN;
267 return SS_OPENING;
268 }
269
270 virtual StreamResult Read(void* buffer, size_t buffer_len,
271 size_t* read, int* error) {
272 if (!base_) {
273 if (error) *error = error_;
274 return (HE_NONE == error_) ? SR_EOS : SR_ERROR;
275 }
276
277 if (HM_RECV != base_->mode_) {
278 return SR_BLOCK;
279 }
280
281 // DoReceiveLoop writes http document data to the StreamInterface* document
282 // member of HttpData. In this case, we want this data to be written
283 // directly to our buffer. To accomplish this, we wrap our buffer with a
284 // StreamInterface, and replace the existing document with our wrapper.
285 // When the method returns, we restore the old document. Ideally, we would
286 // pass our StreamInterface* to DoReceiveLoop, but due to the callbacks
287 // of HttpParser, we would still need to store the pointer temporarily.
288 scoped_ptr<StreamInterface>
289 stream(new BlockingMemoryStream(reinterpret_cast<char*>(buffer),
290 buffer_len));
291
292 // Replace the existing document with our wrapped buffer.
293 base_->data_->document.swap(stream);
294
295 // Pump the I/O loop. DoReceiveLoop is guaranteed not to attempt to
296 // complete the I/O process, which means that our wrapper is not in danger
297 // of being deleted. To ensure this, DoReceiveLoop returns true when it
298 // wants complete to be called. We make sure to uninstall our wrapper
299 // before calling complete().
300 HttpError http_error;
301 bool complete = base_->DoReceiveLoop(&http_error);
302
303 // Reinstall the original output document.
304 base_->data_->document.swap(stream);
305
306 // If we reach the end of the receive stream, we disconnect our stream
307 // adapter from the HttpBase, and further calls to read will either return
308 // EOS or ERROR, appropriately. Finally, we call complete().
309 StreamResult result = SR_BLOCK;
310 if (complete) {
311 HttpBase* base = Disconnect(http_error);
312 if (error) *error = error_;
313 result = (HE_NONE == error_) ? SR_EOS : SR_ERROR;
314 base->complete(http_error);
315 }
316
317 // Even if we are complete, if some data was read we must return SUCCESS.
318 // Future Reads will return EOS or ERROR based on the error_ variable.
319 size_t position;
320 stream->GetPosition(&position);
321 if (position > 0) {
322 if (read) *read = position;
323 result = SR_SUCCESS;
324 }
325 return result;
326 }
327
328 virtual StreamResult Write(const void* data, size_t data_len,
329 size_t* written, int* error) {
330 if (error) *error = -1;
331 return SR_ERROR;
332 }
333
334 virtual void Close() {
335 if (base_) {
336 HttpBase* base = Disconnect(HE_NONE);
337 if (HM_RECV == base->mode_ && base->http_stream_) {
338 // Read I/O could have been stalled on the user of this DocumentStream,
339 // so restart the I/O process now that we've removed ourselves.
340 base->http_stream_->PostEvent(SE_READ, 0);
341 }
342 }
343 }
344
345 virtual bool GetAvailable(size_t* size) const {
346 if (!base_ || HM_RECV != base_->mode_)
347 return false;
348 size_t data_size = base_->GetDataRemaining();
349 if (SIZE_UNKNOWN == data_size)
350 return false;
351 if (size)
352 *size = data_size;
353 return true;
354 }
355
356 HttpBase* Disconnect(HttpError error) {
357 ASSERT(NULL != base_);
358 ASSERT(NULL != base_->doc_stream_);
359 HttpBase* base = base_;
360 base_->doc_stream_ = NULL;
361 base_ = NULL;
362 error_ = error;
363 return base;
364 }
365
366private:
367 HttpBase* base_;
368 HttpError error_;
369};
370
371//////////////////////////////////////////////////////////////////////
372// HttpBase
373//////////////////////////////////////////////////////////////////////
374
375HttpBase::HttpBase() : mode_(HM_NONE), data_(NULL), notify_(NULL),
376 http_stream_(NULL), doc_stream_(NULL) {
377}
378
379HttpBase::~HttpBase() {
380 ASSERT(HM_NONE == mode_);
381}
382
383bool
384HttpBase::isConnected() const {
385 return (http_stream_ != NULL) && (http_stream_->GetState() == SS_OPEN);
386}
387
388bool
389HttpBase::attach(StreamInterface* stream) {
390 if ((mode_ != HM_NONE) || (http_stream_ != NULL) || (stream == NULL)) {
391 ASSERT(false);
392 return false;
393 }
394 http_stream_ = stream;
395 http_stream_->SignalEvent.connect(this, &HttpBase::OnHttpStreamEvent);
396 mode_ = (http_stream_->GetState() == SS_OPENING) ? HM_CONNECT : HM_NONE;
397 return true;
398}
399
400StreamInterface*
401HttpBase::detach() {
402 ASSERT(HM_NONE == mode_);
403 if (mode_ != HM_NONE) {
404 return NULL;
405 }
406 StreamInterface* stream = http_stream_;
407 http_stream_ = NULL;
408 if (stream) {
409 stream->SignalEvent.disconnect(this);
410 }
411 return stream;
412}
413
414void
415HttpBase::send(HttpData* data) {
416 ASSERT(HM_NONE == mode_);
417 if (mode_ != HM_NONE) {
418 return;
419 } else if (!isConnected()) {
420 OnHttpStreamEvent(http_stream_, SE_CLOSE, HE_DISCONNECTED);
421 return;
422 }
423
424 mode_ = HM_SEND;
425 data_ = data;
426 len_ = 0;
427 ignore_data_ = chunk_data_ = false;
428
429 if (data_->document) {
430 data_->document->SignalEvent.connect(this, &HttpBase::OnDocumentEvent);
431 }
432
433 std::string encoding;
434 if (data_->hasHeader(HH_TRANSFER_ENCODING, &encoding)
435 && (encoding == "chunked")) {
436 chunk_data_ = true;
437 }
438
439 len_ = data_->formatLeader(buffer_, sizeof(buffer_));
440 len_ += strcpyn(buffer_ + len_, sizeof(buffer_) - len_, "\r\n");
441
442 header_ = data_->begin();
443 if (header_ == data_->end()) {
444 // We must call this at least once, in the case where there are no headers.
445 queue_headers();
446 }
447
448 flush_data();
449}
450
451void
452HttpBase::recv(HttpData* data) {
453 ASSERT(HM_NONE == mode_);
454 if (mode_ != HM_NONE) {
455 return;
456 } else if (!isConnected()) {
457 OnHttpStreamEvent(http_stream_, SE_CLOSE, HE_DISCONNECTED);
458 return;
459 }
460
461 mode_ = HM_RECV;
462 data_ = data;
463 len_ = 0;
464 ignore_data_ = chunk_data_ = false;
465
466 reset();
467 if (doc_stream_) {
468 doc_stream_->SignalEvent(doc_stream_, SE_OPEN | SE_READ, 0);
469 } else {
470 read_and_process_data();
471 }
472}
473
474void
475HttpBase::abort(HttpError err) {
476 if (mode_ != HM_NONE) {
477 if (http_stream_ != NULL) {
478 http_stream_->Close();
479 }
480 do_complete(err);
481 }
482}
483
484StreamInterface* HttpBase::GetDocumentStream() {
485 if (doc_stream_)
486 return NULL;
487 doc_stream_ = new DocumentStream(this);
488 return doc_stream_;
489}
490
491HttpError HttpBase::HandleStreamClose(int error) {
492 if (http_stream_ != NULL) {
493 http_stream_->Close();
494 }
495 if (error == 0) {
496 if ((mode_ == HM_RECV) && is_valid_end_of_input()) {
497 return HE_NONE;
498 } else {
499 return HE_DISCONNECTED;
500 }
501 } else if (error == SOCKET_EACCES) {
502 return HE_AUTH;
503 } else if (error == SEC_E_CERT_EXPIRED) {
504 return HE_CERTIFICATE_EXPIRED;
505 }
506 LOG_F(LS_ERROR) << "(" << error << ")";
507 return (HM_CONNECT == mode_) ? HE_CONNECT_FAILED : HE_SOCKET_ERROR;
508}
509
510bool HttpBase::DoReceiveLoop(HttpError* error) {
511 ASSERT(HM_RECV == mode_);
512 ASSERT(NULL != error);
513
514 // Do to the latency between receiving read notifications from
515 // pseudotcpchannel, we rely on repeated calls to read in order to acheive
516 // ideal throughput. The number of reads is limited to prevent starving
517 // the caller.
518
519 size_t loop_count = 0;
520 const size_t kMaxReadCount = 20;
521 bool process_requires_more_data = false;
522 do {
523 // The most frequent use of this function is response to new data available
524 // on http_stream_. Therefore, we optimize by attempting to read from the
525 // network first (as opposed to processing existing data first).
526
527 if (len_ < sizeof(buffer_)) {
528 // Attempt to buffer more data.
529 size_t read;
530 int read_error;
531 StreamResult read_result = http_stream_->Read(buffer_ + len_,
532 sizeof(buffer_) - len_,
533 &read, &read_error);
534 switch (read_result) {
535 case SR_SUCCESS:
536 ASSERT(len_ + read <= sizeof(buffer_));
537 len_ += read;
538 break;
539 case SR_BLOCK:
540 if (process_requires_more_data) {
541 // We're can't make progress until more data is available.
542 return false;
543 }
544 // Attempt to process the data already in our buffer.
545 break;
546 case SR_EOS:
547 // Clean close, with no error. Fall through to HandleStreamClose.
548 read_error = 0;
549 case SR_ERROR:
550 *error = HandleStreamClose(read_error);
551 return true;
552 }
553 } else if (process_requires_more_data) {
554 // We have too much unprocessed data in our buffer. This should only
555 // occur when a single HTTP header is longer than the buffer size (32K).
556 // Anything longer than that is almost certainly an error.
557 *error = HE_OVERFLOW;
558 return true;
559 }
560
561 // Process data in our buffer. Process is not guaranteed to process all
562 // the buffered data. In particular, it will wait until a complete
563 // protocol element (such as http header, or chunk size) is available,
564 // before processing it in its entirety. Also, it is valid and sometimes
565 // necessary to call Process with an empty buffer, since the state machine
566 // may have interrupted state transitions to complete.
567 size_t processed;
568 ProcessResult process_result = Process(buffer_, len_, &processed,
569 error);
570 ASSERT(processed <= len_);
571 len_ -= processed;
572 memmove(buffer_, buffer_ + processed, len_);
573 switch (process_result) {
574 case PR_CONTINUE:
575 // We need more data to make progress.
576 process_requires_more_data = true;
577 break;
578 case PR_BLOCK:
579 // We're stalled on writing the processed data.
580 return false;
581 case PR_COMPLETE:
582 // *error already contains the correct code.
583 return true;
584 }
585 } while (++loop_count <= kMaxReadCount);
586
587 LOG_F(LS_WARNING) << "danger of starvation";
588 return false;
589}
590
591void
592HttpBase::read_and_process_data() {
593 HttpError error;
594 if (DoReceiveLoop(&error)) {
595 complete(error);
596 }
597}
598
599void
600HttpBase::flush_data() {
601 ASSERT(HM_SEND == mode_);
602
603 // When send_required is true, no more buffering can occur without a network
604 // write.
605 bool send_required = (len_ >= sizeof(buffer_));
606
607 while (true) {
608 ASSERT(len_ <= sizeof(buffer_));
609
610 // HTTP is inherently sensitive to round trip latency, since a frequent use
611 // case is for small requests and responses to be sent back and forth, and
612 // the lack of pipelining forces a single request to take a minimum of the
613 // round trip time. As a result, it is to our benefit to pack as much data
614 // into each packet as possible. Thus, we defer network writes until we've
615 // buffered as much data as possible.
616
617 if (!send_required && (header_ != data_->end())) {
618 // First, attempt to queue more header data.
619 send_required = queue_headers();
620 }
621
622 if (!send_required && data_->document) {
623 // Next, attempt to queue document data.
624
625 const size_t kChunkDigits = 8;
626 size_t offset, reserve;
627 if (chunk_data_) {
628 // Reserve characters at the start for X-byte hex value and \r\n
629 offset = len_ + kChunkDigits + 2;
630 // ... and 2 characters at the end for \r\n
631 reserve = offset + 2;
632 } else {
633 offset = len_;
634 reserve = offset;
635 }
636
637 if (reserve >= sizeof(buffer_)) {
638 send_required = true;
639 } else {
640 size_t read;
641 int error;
642 StreamResult result = data_->document->Read(buffer_ + offset,
643 sizeof(buffer_) - reserve,
644 &read, &error);
645 if (result == SR_SUCCESS) {
646 ASSERT(reserve + read <= sizeof(buffer_));
647 if (chunk_data_) {
648 // Prepend the chunk length in hex.
649 // Note: sprintfn appends a null terminator, which is why we can't
650 // combine it with the line terminator.
651 sprintfn(buffer_ + len_, kChunkDigits + 1, "%.*x",
652 kChunkDigits, read);
653 // Add line terminator to the chunk length.
654 memcpy(buffer_ + len_ + kChunkDigits, "\r\n", 2);
655 // Add line terminator to the end of the chunk.
656 memcpy(buffer_ + offset + read, "\r\n", 2);
657 }
658 len_ = reserve + read;
659 } else if (result == SR_BLOCK) {
660 // Nothing to do but flush data to the network.
661 send_required = true;
662 } else if (result == SR_EOS) {
663 if (chunk_data_) {
664 // Append the empty chunk and empty trailers, then turn off
665 // chunking.
666 ASSERT(len_ + 5 <= sizeof(buffer_));
667 memcpy(buffer_ + len_, "0\r\n\r\n", 5);
668 len_ += 5;
669 chunk_data_ = false;
670 } else if (0 == len_) {
671 // No more data to read, and no more data to write.
672 do_complete();
673 return;
674 }
675 // Although we are done reading data, there is still data which needs
676 // to be flushed to the network.
677 send_required = true;
678 } else {
679 LOG_F(LS_ERROR) << "Read error: " << error;
680 do_complete(HE_STREAM);
681 return;
682 }
683 }
684 }
685
686 if (0 == len_) {
687 // No data currently available to send.
688 if (!data_->document) {
689 // If there is no source document, that means we're done.
690 do_complete();
691 }
692 return;
693 }
694
695 size_t written;
696 int error;
697 StreamResult result = http_stream_->Write(buffer_, len_, &written, &error);
698 if (result == SR_SUCCESS) {
699 ASSERT(written <= len_);
700 len_ -= written;
701 memmove(buffer_, buffer_ + written, len_);
702 send_required = false;
703 } else if (result == SR_BLOCK) {
704 if (send_required) {
705 // Nothing more we can do until network is writeable.
706 return;
707 }
708 } else {
709 ASSERT(result == SR_ERROR);
710 LOG_F(LS_ERROR) << "error";
711 OnHttpStreamEvent(http_stream_, SE_CLOSE, error);
712 return;
713 }
714 }
715
716 ASSERT(false);
717}
718
719bool
720HttpBase::queue_headers() {
721 ASSERT(HM_SEND == mode_);
722 while (header_ != data_->end()) {
723 size_t len = sprintfn(buffer_ + len_, sizeof(buffer_) - len_,
724 "%.*s: %.*s\r\n",
725 header_->first.size(), header_->first.data(),
726 header_->second.size(), header_->second.data());
727 if (len_ + len < sizeof(buffer_) - 3) {
728 len_ += len;
729 ++header_;
730 } else if (len_ == 0) {
731 LOG(WARNING) << "discarding header that is too long: " << header_->first;
732 ++header_;
733 } else {
734 // Not enough room for the next header, write to network first.
735 return true;
736 }
737 }
738 // End of headers
739 len_ += strcpyn(buffer_ + len_, sizeof(buffer_) - len_, "\r\n");
740 return false;
741}
742
743void
744HttpBase::do_complete(HttpError err) {
745 ASSERT(mode_ != HM_NONE);
746 HttpMode mode = mode_;
747 mode_ = HM_NONE;
748 if (data_ && data_->document) {
749 data_->document->SignalEvent.disconnect(this);
750 }
751 data_ = NULL;
752 if ((HM_RECV == mode) && doc_stream_) {
753 ASSERT(HE_NONE != err); // We should have Disconnected doc_stream_ already.
754 DocumentStream* ds = doc_stream_;
755 ds->Disconnect(err);
756 ds->SignalEvent(ds, SE_CLOSE, err);
757 }
758 if (notify_) {
759 notify_->onHttpComplete(mode, err);
760 }
761}
762
763//
764// Stream Signals
765//
766
767void
768HttpBase::OnHttpStreamEvent(StreamInterface* stream, int events, int error) {
769 ASSERT(stream == http_stream_);
770 if ((events & SE_OPEN) && (mode_ == HM_CONNECT)) {
771 do_complete();
772 return;
773 }
774
775 if ((events & SE_WRITE) && (mode_ == HM_SEND)) {
776 flush_data();
777 return;
778 }
779
780 if ((events & SE_READ) && (mode_ == HM_RECV)) {
781 if (doc_stream_) {
782 doc_stream_->SignalEvent(doc_stream_, SE_READ, 0);
783 } else {
784 read_and_process_data();
785 }
786 return;
787 }
788
789 if ((events & SE_CLOSE) == 0)
790 return;
791
792 HttpError http_error = HandleStreamClose(error);
793 if (mode_ == HM_RECV) {
794 complete(http_error);
795 } else if (mode_ != HM_NONE) {
796 do_complete(http_error);
797 } else if (notify_) {
798 notify_->onHttpClosed(http_error);
799 }
800}
801
802void
803HttpBase::OnDocumentEvent(StreamInterface* stream, int events, int error) {
804 ASSERT(stream == data_->document.get());
805 if ((events & SE_WRITE) && (mode_ == HM_RECV)) {
806 read_and_process_data();
807 return;
808 }
809
810 if ((events & SE_READ) && (mode_ == HM_SEND)) {
811 flush_data();
812 return;
813 }
814
815 if (events & SE_CLOSE) {
816 LOG_F(LS_ERROR) << "Read error: " << error;
817 do_complete(HE_STREAM);
818 return;
819 }
820}
821
822//
823// HttpParser Implementation
824//
825
826HttpParser::ProcessResult
827HttpBase::ProcessLeader(const char* line, size_t len, HttpError* error) {
828 *error = data_->parseLeader(line, len);
829 return (HE_NONE == *error) ? PR_CONTINUE : PR_COMPLETE;
830}
831
832HttpParser::ProcessResult
833HttpBase::ProcessHeader(const char* name, size_t nlen, const char* value,
834 size_t vlen, HttpError* error) {
835 std::string sname(name, nlen), svalue(value, vlen);
836 data_->addHeader(sname, svalue);
837 return PR_CONTINUE;
838}
839
840HttpParser::ProcessResult
841HttpBase::ProcessHeaderComplete(bool chunked, size_t& data_size,
842 HttpError* error) {
843 StreamInterface* old_docstream = doc_stream_;
844 if (notify_) {
845 *error = notify_->onHttpHeaderComplete(chunked, data_size);
846 // The request must not be aborted as a result of this callback.
847 ASSERT(NULL != data_);
848 }
849 if ((HE_NONE == *error) && data_->document) {
850 data_->document->SignalEvent.connect(this, &HttpBase::OnDocumentEvent);
851 }
852 if (HE_NONE != *error) {
853 return PR_COMPLETE;
854 }
855 if (old_docstream != doc_stream_) {
856 // Break out of Process loop, since our I/O model just changed.
857 return PR_BLOCK;
858 }
859 return PR_CONTINUE;
860}
861
862HttpParser::ProcessResult
863HttpBase::ProcessData(const char* data, size_t len, size_t& read,
864 HttpError* error) {
865 if (ignore_data_ || !data_->document) {
866 read = len;
867 return PR_CONTINUE;
868 }
869 int write_error = 0;
870 switch (data_->document->Write(data, len, &read, &write_error)) {
871 case SR_SUCCESS:
872 return PR_CONTINUE;
873 case SR_BLOCK:
874 return PR_BLOCK;
875 case SR_EOS:
876 LOG_F(LS_ERROR) << "Unexpected EOS";
877 *error = HE_STREAM;
878 return PR_COMPLETE;
879 case SR_ERROR:
880 default:
881 LOG_F(LS_ERROR) << "Write error: " << write_error;
882 *error = HE_STREAM;
883 return PR_COMPLETE;
884 }
885}
886
887void
888HttpBase::OnComplete(HttpError err) {
889 LOG_F(LS_VERBOSE);
890 do_complete(err);
891}
892
893} // namespace talk_base