Ticket #5192: 5192_3.patch

File 5192_3.patch, 30.8 KB (added by darfire, 3 years ago)
  • twisted/web/test/test_newclient.py

     
    77 
    88__metaclass__ = type 
    99 
     10from StringIO import StringIO 
     11 
    1012from zope.interface import implements 
    1113from zope.interface.verify import verifyObject 
    1214 
     
    1618from twisted.internet.error import ConnectionDone 
    1719from twisted.internet.defer import Deferred, succeed, fail 
    1820from twisted.internet.protocol import Protocol 
     21from twisted.internet.task import Clock, Cooperator 
    1922from twisted.trial.unittest import TestCase 
    2023from twisted.test.proto_helpers import StringTransport, AccumulatingProtocol 
    2124from twisted.web._newclient import UNKNOWN_LENGTH, STATUS, HEADER, BODY, DONE 
     
    3033from twisted.web.http_headers import Headers 
    3134from twisted.web.http import _DataLoss 
    3235from twisted.web.iweb import IBodyProducer, IResponse 
     36from twisted.web.client import FileBodyProducer 
    3337 
    3438 
    3539 
     
    793797    """ 
    794798    method = 'GET' 
    795799    stopped = False 
     800    headers = Headers() 
    796801 
    797802    def writeTo(self, transport): 
    798803        self.finished = Deferred() 
     
    811816    returns a succeeded L{Deferred}.  This vaguely emulates the behavior of a 
    812817    L{Request} with no body producer. 
    813818    """ 
     819    headers = Headers() 
    814820    def writeTo(self, transport): 
    815821        transport.write('SOME BYTES') 
    816822        return succeed(None) 
     
    827833        Create an L{HTTP11ClientProtocol} connected to a fake transport. 
    828834        """ 
    829835        self.transport = StringTransport() 
    830         self.protocol = HTTP11ClientProtocol() 
     836        self.clock = Clock() 
     837        self.protocol = HTTP11ClientProtocol(reactor = self.clock) 
    831838        self.protocol.makeConnection(self.transport) 
    832839 
    833840 
     
    879886        L{RequestGenerationFailed} wrapping the underlying failure. 
    880887        """ 
    881888        class BrokenRequest: 
     889            headers = Headers() 
    882890            def writeTo(self, transport): 
    883891                return fail(ArbitraryException()) 
    884892 
     
    901909        a L{Failure} of L{RequestGenerationFailed} wrapping that exception. 
    902910        """ 
    903911        class BrokenRequest: 
     912            headers = Headers() 
    904913            def writeTo(self, transport): 
    905914                raise ArbitraryException() 
    906915 
     
    13121321                                        [ConnectionAborted, _DataLoss]) 
    13131322        return deferred.addCallback(checkError) 
    13141323 
     1324    def _send100ContinueRequest(self, body): 
     1325        """ 
     1326        Send a L{Request} that expects 100-Continue with the given body. 
     1327        """ 
     1328        def _immediateScheduler(x): 
     1329            return succeed(x()) 
    13151330 
     1331        cooperator = Cooperator(scheduler = _immediateScheduler, started = False) 
     1332        producer = FileBodyProducer(StringIO(body), cooperator = cooperator) 
    13161333 
     1334        headers = Headers({'host': ['example.com'], 'expect': ['100-Continue']}) 
     1335 
     1336        d = self.protocol.request(Request('POST', '/foo', headers, producer)) 
     1337 
     1338        self.transport.clear() 
     1339 
     1340        cooperator.start() 
     1341 
     1342        self.assertEqual(self.transport.value(), '') 
     1343 
     1344        return d 
     1345 
     1346    def test_expect100ContinueGetFinalStatus(self): 
     1347        """ 
     1348        When we expect 100-Continue and get a final status L{Response} we don't 
     1349        send the L{Request} body and return the first L{Response} to the user. 
     1350        """ 
     1351        d = self._send100ContinueRequest('x' * 10) 
     1352 
     1353        def cbResponse(response): 
     1354            self.assertEqual(response.code, 200) 
     1355 
     1356        d.addCallback(cbResponse) 
     1357 
     1358        self.protocol.dataReceived( 
     1359                "HTTP/1.1 200 OK\r\n" 
     1360                "Content-length: 0\r\n" 
     1361                "\r\n") 
     1362 
     1363        self.assertEqual(self.transport.value(), '') 
     1364 
     1365        return d 
     1366 
     1367    def test_expect100ContinueGet100Continue(self): 
     1368        """ 
     1369        When we expect 100-Continue and get an 100-Continue L{Response} we send 
     1370        the L{Request} body and return the second L{Response} to the user. 
     1371        """ 
     1372        d = self._send100ContinueRequest('x' * 10) 
     1373 
     1374        def cbResponse(response): 
     1375            self.assertEqual(response.code, 200) 
     1376 
     1377        d.addCallback(cbResponse) 
     1378 
     1379        self.protocol.dataReceived( 
     1380                "HTTP/1.1 100 Continue\r\n" 
     1381                "Content-Length: 3\r\n" 
     1382                "\r\n" 
     1383                "123") 
     1384 
     1385        self.protocol.dataReceived( 
     1386                "HTTP/1.1 200 OK\r\n" 
     1387                "Content-Length: 0\r\n" 
     1388                "\r\n") 
     1389 
     1390        self.assertEqual(self.transport.value(), 'x' * 10) 
     1391 
     1392        return d 
     1393 
     1394    def test_expect100ContinueGet100ContinueBackToBack(self): 
     1395        """ 
     1396        When we expect 100-Continue and we get 2 response back to back (100 and 
     1397        final status) we should act as if they came separately. 
     1398        """ 
     1399        d = self._send100ContinueRequest('x' * 10) 
     1400 
     1401        def cbResponse(response): 
     1402            self.assertEqual(response.code, 200) 
     1403 
     1404        d.addCallback(cbResponse) 
     1405 
     1406        self.protocol.dataReceived( 
     1407                "HTTP/1.1 100 Continue\r\n" 
     1408                "Content-Length: 3\r\n" 
     1409                "\r\n" 
     1410                "123" 
     1411                "HTTP/1.1 200 OK\r\n" 
     1412                "Content-Length: 0\r\n" 
     1413                "\r\n") 
     1414 
     1415        self.assertEqual(self.transport.value(), 'x' * 10) 
     1416 
     1417        return d 
     1418 
     1419    def test_expect100ContinueServerBroken(self): 
     1420        """ 
     1421        When we expect 100-Continue and the server is broken and waits for the 
     1422        L{Request} body we wait for a limited amount and then send the body. 
     1423        """ 
     1424        d = self._send100ContinueRequest('x' * 10) 
     1425 
     1426        def cbResponse(response): 
     1427            self.assertEqual(response.code, 200) 
     1428 
     1429        d.addCallback(cbResponse) 
     1430 
     1431        self.clock.advance(10) 
     1432 
     1433        self.assertEqual(self.transport.value(), 'x' * 10) 
     1434 
     1435        self.protocol.dataReceived( 
     1436                "HTTP/1.1 200 OK\r\n" 
     1437                "Content-Length: 0\r\n" 
     1438                "\r\n") 
     1439 
     1440        return d 
     1441 
     1442    def test_expect100ContinueTimerFiresLate100ContinueResponse(self): 
     1443        """ 
     1444        When we expect 100-Continue and the server is slow and sends an 
     1445        100-Continue after we sent the body we consume the 100-Continue 
     1446        L{Response} and return the second L{Response} to the user. 
     1447        """ 
     1448        d = self._send100ContinueRequest('x' * 10) 
     1449 
     1450        def cbResponse(response): 
     1451            self.assertEqual(response.code, 200) 
     1452 
     1453        d.addCallback(cbResponse) 
     1454 
     1455        self.clock.advance(10) 
     1456 
     1457        self.assertEqual(self.transport.value(), 'x' * 10) 
     1458 
     1459        self.protocol.dataReceived( 
     1460                "HTTP/1.1 100 Continue\r\n" 
     1461                "Content-length: 3\r\n" 
     1462                "\r\n" 
     1463                "123") 
     1464 
     1465        self.protocol.dataReceived( 
     1466                "HTTP/1.1 200 OK\r\n" 
     1467                "Content-length: 0\r\n" 
     1468                "\r\n") 
     1469 
     1470        return d 
     1471 
     1472    _garbageResponse = "unparseable garbage goes here\r\n" 
     1473 
     1474    def test_expect100ContinueBrokenFirstResponse(self): 
     1475        """ 
     1476        When we expect 100-Continue and the first L{Response} is broken, return 
     1477        the error to the user. 
     1478        """ 
     1479        d = self._send100ContinueRequest('x' * 10) 
     1480 
     1481        self.protocol.dataReceived(self._garbageResponse) 
     1482 
     1483        self.assertEqual(self.transport.value(), '') 
     1484 
     1485        return assertResponseFailed(self, d, [ParseError]) 
     1486 
     1487    def test_expect100ContinueBrokenFirstResponseChunkedBody(self): 
     1488        """ 
     1489        When we expect 100-Continue and the 100-Continue L{Response} has a 
     1490        chunked body and it is broken, return the error to the user. 
     1491        """ 
     1492        d = self._send100ContinueRequest('x' * 10) 
     1493 
     1494        self.protocol.dataReceived( 
     1495            "HTTP/1.1 100 Continue\r\n" 
     1496            "Transfer-Encoding: chunked\r\n" 
     1497            "\r\n") 
     1498 
     1499        self.protocol.dataReceived("3\r\nzzz\r\n") 
     1500        self.protocol.dataReceived("3\r\nzzz\r\nzzz\r\n") #incorrect chunk 
     1501 
     1502        self.assertEqual(self.transport.value(), '') 
     1503 
     1504        return assertResponseFailed(self, d, [ValueError, _DataLoss]) 
     1505 
     1506    def test_expect100ContinueBrokenSecondResponse(self): 
     1507        """ 
     1508        When we expect 100-Continue and the 100-Continue L{Response} is ok but 
     1509        the second L{Response} is broken, return the error to the user. 
     1510        """ 
     1511        d = self._send100ContinueRequest('x' * 10) 
     1512 
     1513        self.protocol.dataReceived( 
     1514                "HTTP/1.1 100 Continue\r\n" 
     1515                "Content-length: 3\r\n" 
     1516                "\r\n" 
     1517                "123") 
     1518 
     1519        self.protocol.dataReceived(self._garbageResponse) 
     1520 
     1521        self.assertEqual(self.transport.value(), 'x' * 10) 
     1522 
     1523        return assertResponseFailed(self, d, [ParseError]) 
     1524 
     1525 
    13171526class StringProducer: 
    13181527    """ 
    13191528    L{StringProducer} is a dummy body producer. 
  • twisted/web/_newclient.py

     
    565565        requestLines.append('\r\n') 
    566566        transport.writeSequence(requestLines) 
    567567 
    568  
    569     def _writeToChunked(self, transport): 
     568    def _writeBodyToChunked(self, transport): 
    570569        """ 
    571         Write this request to the given transport using chunked 
     570        Write this request's body to the given transport using chunked 
    572571        transfer-encoding to frame the body. 
    573572        """ 
    574         self._writeHeaders(transport, 'Transfer-Encoding: chunked\r\n') 
    575573        encoder = ChunkedEncoder(transport) 
    576574        encoder.registerProducer(self.bodyProducer, True) 
    577575        d = self.bodyProducer.startProducing(encoder) 
     
    590588        return d 
    591589 
    592590 
    593     def _writeToContentLength(self, transport): 
     591    def _writeBodyToContentLength(self, transport): 
    594592        """ 
    595         Write this request to the given transport using content-length to frame 
    596         the body. 
     593        Write this request's body to the given transport using content-length 
     594        to frame the body. 
    597595        """ 
    598         self._writeHeaders( 
    599             transport, 
    600             'Content-Length: %d\r\n' % (self.bodyProducer.length,)) 
    601596 
    602597        # This Deferred is used to signal an error in the data written to the 
    603598        # encoder below.  It can only errback and it will only do so before too 
     
    709704            been completely written to the transport or with a L{Failure} if 
    710705            there is any problem generating the request bytes. 
    711706        """ 
     707        self._writeHeadersTo(transport) 
     708        return self._writeBodyTo(transport) 
     709 
     710    def _writeHeadersTo(self, transport): 
     711        """ 
     712        Format this L{Request}'s headers as HTTP/1.1 and write them 
     713        synchronously to the given transport 
     714        """ 
     715        TEorCL = None 
    712716        if self.bodyProducer is not None: 
     717           if self.bodyProducer.length is UNKNOWN_LENGTH: 
     718               TEorCL = "Transfer-Encoding: chunked\r\n" 
     719           else: 
     720               TEorCL = 'Content-Length: %d\r\n' % (self.bodyProducer.length,) 
     721 
     722        self._writeHeaders(transport, TEorCL) 
     723 
     724    def _writeBodyTo(self, transport): 
     725        """ 
     726        Write this L{Request}'s body to the given transport, framing it 
     727        according to the given headers('Transport-Encoding' or 
     728        'Content-Length'). 
     729 
     730        @return: A L{Deferred} which fires with C{None} when the request has 
     731            been completely written the transport or with a L{Failure} if there 
     732            is any problem generating the request body bytes. If bodyProducer 
     733            is None the returned L{Deferred} is already fired. 
     734        """ 
     735        if self.bodyProducer is not None: 
    713736            if self.bodyProducer.length is UNKNOWN_LENGTH: 
    714                 return self._writeToChunked(transport) 
     737                return self._writeBodyToChunked(transport) 
    715738            else: 
    716                 return self._writeToContentLength(transport) 
     739                return self._writeBodyToContentLength(transport) 
    717740        else: 
    718             self._writeHeaders(transport, None) 
    719741            return succeed(None) 
    720742 
    721743 
     
    11581180            self._producer.pauseProducing() 
    11591181 
    11601182 
     1183class DiscardWithDeferred(Protocol): 
     1184    """ 
     1185    A L{Protocol} that discards all received data and that fires a L{Deferred} 
     1186    when all data has been received. 
    11611187 
     1188    @ivar finishedDeferred: L{Deferred} which fires with C{None} when all data 
     1189        has been received and with L{Failure} on error. 
     1190 
     1191    """ 
     1192 
     1193    def __init__(self): 
     1194        self.finishedDeferred = Deferred() 
     1195 
     1196    def dataReceived(self, data): 
     1197        pass 
     1198 
     1199    def connectionLost(self, reason): 
     1200        if reason.type == ResponseDone: 
     1201            self.finishedDeferred.callback(None) 
     1202        else: 
     1203            self.finishedDeferred.errback(reason) 
     1204 
     1205 
     1206TIMEOUT_100_CONTINUE = 1 
     1207 
     1208 
    11621209class HTTP11ClientProtocol(Protocol): 
    11631210    """ 
    11641211    L{HTTP11ClientProtocol} is an implementation of the HTTP 1.1 client 
    1165     protocol.  It supports as few features as possible. 
     1212    protocol. It supports as few features as possible. 
    11661213 
    11671214    @ivar _parser: After a request is issued, the L{HTTPClientParser} to 
    11681215        which received data making up the response to that request is 
    11691216        delivered. 
    11701217 
     1218    @ivar _reactor: The reactor used for eventual callLater calls. 
     1219 
    11711220    @ivar _finishedRequest: After a request is issued, the L{Deferred} which 
    11721221        will fire when a L{Response} object corresponding to that request is 
    11731222        available.  This allows L{HTTP11ClientProtocol} to fail the request 
     
    11881237        received.  This is eventually chained with C{_finishedRequest}, but 
    11891238        only in certain cases to avoid double firing that Deferred. 
    11901239 
     1240    @ivar _responseBodyDeferred: After a request is issued, the L{Deferred} 
     1241        that fires when the C{_parser} has done parsing the entire L{Response}, 
     1242        including body, with the data that came after the current L{Response}. 
     1243        This can be used to set another C{_parser} but usually this decision 
     1244        is done on C{_responseDeferred}'s callback. 
     1245 
     1246    @ivar _forcedRequestBody: True if we had an 100-Continue L{Request} whose 
     1247        body was forcefully written to the transport because the server 
     1248        did not respond in time with a L{Response} (possibly because of a 
     1249        buggy server that doesn't implement expectations correctly). 
     1250 
     1251    @ivar _firstResponseTimer: A L{Delayed} that fires after 
     1252        TIMEOUT_100_CONTINUE seconds and forcefully sends the L{Request} body 
     1253        to the server. 
     1254 
     1255    @ivar _firstResponseDeferred: A L{Deferred} that fires with the first 
     1256        L{Response} to an 100-Continue L{Request}. This may be an 100-Continue 
     1257        response or a response with a final status. It may fire in the 
     1258        WAITING_100_CONTINUE_RESPONSE, TRANSMITTING or WAITING states. 
     1259 
     1260    @ivar _firstResponseBodyDeferred: A L{Deferred} that fires when the body 
     1261        of the first L{Response} to an 100-Continue L{Request} has been 
     1262        successfully parsed. It signals that we can start sending the 
     1263        L{Request} body. It fires in the WAITING_100_CONTINUE_RESPONSE_BODY, 
     1264        TRANSMITTING and WAITING states. 
     1265 
    11911266    @ivar _state: Indicates what state this L{HTTP11ClientProtocol} instance 
    11921267        is in with respect to transmission of a request and reception of a 
    11931268        response.  This may be one of the following strings: 
     
    12081283          - GENERATION_FAILED: There was an error while the request.  The 
    12091284            request was not fully sent to the network. 
    12101285 
     1286          - WAITING_100_CONTINUE_RESPONSE: We're waiting for a L{Response} to a 
     1287            L{Request} that expects 100-Continue. 
     1288 
     1289          - WAITING_100_CONTINUE_RESPONSE_BODY: Got an 100 Continue 
     1290            L{Response} and we're discarding its body before sending the 
     1291            L{Request}. 
     1292 
    12111293          - WAITING: The request was fully sent to the network.  The 
    12121294            instance is now waiting for the response to be fully received. 
    12131295 
     
    12151297            be aborted. 
    12161298 
    12171299          - CONNECTION_LOST: The connection has been lost. 
     1300    """ 
    12181301 
    1219     """ 
    12201302    _state = 'QUIESCENT' 
    12211303    _parser = None 
    12221304 
     1305 
     1306    def __init__(self, reactor = None): 
     1307        """ 
     1308        Initialize this L{HTTP11ClientProtocol}. Optionally a reactor can be 
     1309        given. Otherwise use the global reactor. 
     1310        """ 
     1311        if reactor is None: 
     1312            from twisted.internet import reactor 
     1313        self._reactor = reactor 
     1314 
     1315 
    12231316    def request(self, request): 
    12241317        """ 
    12251318        Issue C{request} over C{self.transport} and return a L{Deferred} which 
     
    12391332            may errback with L{RequestNotSent} if it is not possible to send 
    12401333            any more requests using this L{HTTP11ClientProtocol}. 
    12411334        """ 
     1335 
    12421336        if self._state != 'QUIESCENT': 
    12431337            return fail(RequestNotSent()) 
    12441338 
    1245         self._state = 'TRANSMITTING' 
    1246         _requestDeferred = maybeDeferred(request.writeTo, self.transport) 
     1339        if request.headers.hasHeader('expect'): 
     1340            _expectations = request.headers.getRawHeaders('expect') 
     1341            _expects100Continue = '100-continue' in [x.lower() for x in 
     1342                    _expectations] 
     1343        else: 
     1344            _expects100Continue = False 
     1345 
    12471346        self._finishedRequest = Deferred() 
    12481347 
    1249         # Keep track of the Request object in case we need to call stopWriting 
    1250         # on it. 
    12511348        self._currentRequest = request 
    12521349 
    1253         self._transportProxy = TransportProxyProducer(self.transport) 
    1254         self._parser = HTTPClientParser(request, self._finishResponse) 
    1255         self._parser.makeConnection(self._transportProxy) 
    1256         self._responseDeferred = self._parser._responseDeferred 
     1350        if _expects100Continue: 
     1351            self._handle100ContinueRequest(request) 
     1352        else: 
     1353            self._handleRequest(request) 
    12571354 
    12581355        def cbRequestWrotten(ignored): 
    1259             if self._state == 'TRANSMITTING': 
     1356            if self._state in ['TRANSMITTING', 
     1357                    'WAITING_100_CONTINUE_RESPONSE', 
     1358                    'WAITING_100_CONTINUE_RESPONSE_BODY']: 
    12601359                self._state = 'WAITING' 
    12611360                # XXX We're stuck in WAITING until we lose the connection now. 
    12621361                # This will be wrong when persistent connections are supported. 
     
    12741373                log.err(err, 'Error writing request, but not in valid state ' 
    12751374                             'to finalize request: %s' % self._state) 
    12761375 
    1277         _requestDeferred.addCallbacks(cbRequestWrotten, ebRequestWriting) 
     1376        self._requestDeferred.addCallbacks(cbRequestWrotten, ebRequestWriting) 
    12781377 
     1378        def cbResponseBody(ignored): 
     1379            if self._state == 'TRANSMITTING': 
     1380                # The server sent the entire response before we could send the 
     1381                # whole request.  That sucks.  Oh well.  Fire the request() 
     1382                # Deferred with the response.  But first, make sure that if the 
     1383                # request does ever finish being written that it won't try to 
     1384                # fire that Deferred. 
     1385                self._state = 'TRANSMITTING_AFTER_RECEIVING_RESPONSE' 
     1386                self._responseDeferred.chainDeferred(self._finishedRequest) 
     1387 
     1388            self._giveUp(Failure(ConnectionDone("synthetic!"))) 
     1389 
     1390 
     1391        self._responseBodyDeferred.addCallback(cbResponseBody) 
     1392 
    12791393        return self._finishedRequest 
    12801394 
    12811395 
    1282     def _finishResponse(self, rest): 
     1396    def _handleRequest(self, request): 
    12831397        """ 
    1284         Called by an L{HTTPClientParser} to indicate that it has parsed a 
    1285         complete response. 
     1398        Send a non-100-Continue-expecting L{Request} to the transport. 
    12861399 
    1287         @param rest: A C{str} giving any trailing bytes which were given to 
    1288             the L{HTTPClientParser} which were not part of the response it 
    1289             was parsing. 
     1400        @param request: The L{Request} to be sent. 
     1401 
    12901402        """ 
    1291         # XXX this is because Connection: close is hard-coded above, probably 
    1292         # will want to change that at some point.  Either the client or the 
    1293         # server can control this. 
     1403        self._state = 'TRANSMITTING' 
    12941404 
    1295         # XXX If the connection isn't being closed at this point, it's 
    1296         # important to make sure the transport isn't paused (after _giveUp, 
    1297         # or inside it, or something - after the parser can no longer touch 
    1298         # the transport) 
     1405        self._requestDeferred = maybeDeferred(request.writeTo, self.transport) 
    12991406 
    1300         # For both of the above, see #3420 for persistent connections. 
     1407        (self._responseDeferred, 
     1408                self._responseBodyDeferred) = self._setupParser(request) 
    13011409 
    1302         if self._state == 'TRANSMITTING': 
    1303             # The server sent the entire response before we could send the 
    1304             # whole request.  That sucks.  Oh well.  Fire the request() 
    1305             # Deferred with the response.  But first, make sure that if the 
    1306             # request does ever finish being written that it won't try to fire 
    1307             # that Deferred. 
    1308             self._state = 'TRANSMITTING_AFTER_RECEIVING_RESPONSE' 
    1309             self._responseDeferred.chainDeferred(self._finishedRequest) 
    13101410 
    1311         self._giveUp(Failure(ConnectionDone("synthetic!"))) 
     1411    def _handle100ContinueRequest(self, request): 
     1412        """ 
     1413        Send an 100-Continue L{Request} to the transport. 
    13121414 
     1415        @param request: The L{Request} to be sent. 
     1416        """ 
     1417        # This is synchronous. 
     1418        request._writeHeadersTo(self.transport) 
    13131419 
     1420        self._state = 'WAITING_100_CONTINUE_RESPONSE' 
     1421 
     1422        self._forcedRequestBody = False #yet 
     1423 
     1424        self._requestDeferred = Deferred() 
     1425 
     1426        self._responseDeferred = Deferred() 
     1427 
     1428        self._responseBodyDeferred = Deferred() 
     1429 
     1430        (self._firstResponseDeferred, 
     1431                self._firstResponseBodyDeferred) = self._setupParser(request) 
     1432 
     1433        self._firstResponseTimer = self._reactor.callLater( 
     1434                TIMEOUT_100_CONTINUE, 
     1435                self._forceRequestBody) 
     1436 
     1437        self._firstResponseDeferred.addCallbacks( 
     1438                self._handleFirstResponse, self._handle100ContinueError) 
     1439 
     1440 
     1441    def _handleFirstResponse(self, response): 
     1442        """ 
     1443        Handle the first L{Response} to an 100-Continue L{Request}. This may 
     1444        be an 100-Continue or a final status L{Response}. If it is an 100 
     1445        response, consume it's body and then send the L{Request} body, else 
     1446        forward the response to the user by means of self._finishedRequest. 
     1447 
     1448        @param response: The L{Response} to the current L{Request}. 
     1449        """ 
     1450        # This may be inactive if this is a response that came after the timer 
     1451        # fired. 
     1452        if self._firstResponseTimer.active(): 
     1453            self._firstResponseTimer.cancel() 
     1454 
     1455        if self._state == 'WAITING_100_CONTINUE_RESPONSE': 
     1456 
     1457            if response.code == 100: 
     1458 
     1459                self._state = 'WAITING_100_CONTINUE_RESPONSE_BODY' 
     1460 
     1461                self._discardResponseBody(response, 
     1462                        self._handleFirstResponseBody, 
     1463                        self._handle100ContinueError) 
     1464            else: 
     1465                # We're done with this request. 
     1466 
     1467                self._requestDeferred.callback(None) 
     1468 
     1469                self._firstResponseBodyDeferred.chainDeferred( 
     1470                        self._responseBodyDeferred) 
     1471 
     1472                self._responseDeferred.callback(response) 
     1473 
     1474        else: 
     1475 
     1476            if self._forcedRequestBody and response.code == 100: 
     1477                # Late arrival, eat it. 
     1478                self._discardResponseBody(response, 
     1479                        self._handleFirstResponseBody, 
     1480                        self._handle100ContinueError) 
     1481 
     1482            else: 
     1483                # A late response that isn't 100-Continue; could be from a 
     1484                # server that doesn't implement expectations correctly. 
     1485                self._forcedRequestBody = False 
     1486 
     1487                self._firstResponseBodyDeferred.chainDeferred( 
     1488                        self._responseBodyDeferred) 
     1489 
     1490                self._responseDeferred.callback(response) 
     1491 
     1492 
     1493    def _handleFirstResponseBody(self, rest): 
     1494        """ 
     1495        The body of the first L{Response} to the current 100-Continue 
     1496        L{Request} has been parse. If the L{Response} wasn't an 100-Continue 
     1497        forward to self._responseBodyDeferred. Else create a new parser for 
     1498        the second L{Response}. 
     1499 
     1500        @param rest: Data that wasn't parsed by the parser because it came 
     1501            after the L{Response}. If we reload the parser, initialize it 
     1502            with this data. 
     1503        """ 
     1504        if self._forcedRequestBody or self._state == 'WAITING_100_CONTINUE_RESPONSE_BODY': 
     1505            # We've just done discarding an 100-Continue response's body. Might 
     1506            # be because we're waiting to send the request body or it might be 
     1507            # that we've ignored a late response. 
     1508 
     1509            self._forcedRequestBody = False 
     1510 
     1511            if self._state == 'WAITING_100_CONTINUE_RESPONSE_BODY': 
     1512 
     1513                self._state = 'TRANSMITTING' 
     1514 
     1515                # Send the request body. 
     1516 
     1517                _requestBodyDeferred = maybeDeferred( 
     1518                        self._currentRequest._writeBodyTo, 
     1519                        self.transport) 
     1520 
     1521                _requestBodyDeferred.chainDeferred(self._requestDeferred) 
     1522 
     1523            # In both cases create a new parser. 
     1524 
     1525            self._disconnectParser(None) 
     1526 
     1527            (_secondResponseDeferred, 
     1528                    _secondResponseBodyDeferred) = self._setupParser( 
     1529                            self._currentRequest, data = rest) 
     1530 
     1531            _secondResponseDeferred.chainDeferred(self._responseDeferred) 
     1532 
     1533            _secondResponseBodyDeferred.chainDeferred(self._responseBodyDeferred) 
     1534 
     1535        else: 
     1536            self._responseBodyDeferred.callback(rest) 
     1537 
     1538 
     1539    def _discardResponseBody(self, response, callback, errback): 
     1540        """ 
     1541        Discard a L{Response}'s body and call callback when done and errback 
     1542        on error. 
     1543 
     1544        @param response: L{Response} that needs to be discarded. 
     1545        @param callback: function to be called when done. 
     1546        @param errback: function to be called on error 
     1547        """ 
     1548        discarder = DiscardWithDeferred() 
     1549 
     1550        # We use discarder.finishedDeferred to catch body parsing 
     1551        # errors and self._firstResponseBodyDeferred to catch success. 
     1552 
     1553        discarder.finishedDeferred.addErrback(errback) 
     1554 
     1555        response.deliverBody(discarder) 
     1556 
     1557        self._firstResponseBodyDeferred.addCallback(callback) 
     1558 
     1559 
     1560    def _forceRequestBody(self): 
     1561        """ 
     1562        Send the current L{Request} body even though we were expecting an 
     1563        100 or final status L{Response}. It may just be a broken server that 
     1564        doesn't implement correctly expectations. 
     1565        """ 
     1566        self._state = 'TRANSMITTING' 
     1567 
     1568        self._forcedRequestBody = True 
     1569 
     1570        _requestBodyDeferred = maybeDeferred(self._currentRequest._writeBodyTo, 
     1571                self.transport) 
     1572 
     1573        _requestBodyDeferred.chainDeferred(self._requestDeferred) 
     1574 
     1575 
     1576    def _setupParser(self, request, data = ''): 
     1577        """ 
     1578        Setup a L{HTTPClientParser} for a L{Response} to a L{Request}. If this 
     1579        is not the first parser associated with this protocol, call 
     1580        L{HTTP11ClientProtocol._disconnectParser} first. Pass the given C{data} 
     1581        to the newly created parser. 
     1582 
     1583        @param request: L{Request} waiting for a L{Response}. 
     1584        @param data: Data to initialize the L{HTTPClientParser} with. 
     1585        """ 
     1586        self._transportProxy = TransportProxyProducer(self.transport) 
     1587 
     1588        _responseBodyDeferred = Deferred() 
     1589 
     1590        def cbOnBodyFinish(rest): 
     1591            _responseBodyDeferred.callback(rest) 
     1592 
     1593        self._parser = HTTPClientParser(request, cbOnBodyFinish) 
     1594 
     1595        self._parser.makeConnection(self._transportProxy) 
     1596 
     1597        # Grab this before passing data, since it might disappear if data is a 
     1598        # complete response. 
     1599 
     1600        _responseDeferred = self._parser._responseDeferred 
     1601 
     1602        self._parser.dataReceived(data) 
     1603 
     1604        return (_responseDeferred, _responseBodyDeferred) 
     1605 
     1606 
     1607    def _cleanupOn100ContinueError(self): 
     1608        """ 
     1609        State-dependent cleanup on parsing errors while handling an 
     1610        100-Continue-expecting L{Request}. 
     1611        """ 
     1612    _cleanupOn100ContinueError = makeStatefulDispatcher( 
     1613            "cleanupOn100ContinueError", _cleanupOn100ContinueError) 
     1614 
     1615 
     1616    def _cleanupOn100ContinueError_WAITING_100_CONTINUE_RESPONSE(self): 
     1617        """ 
     1618        The L{Request} body no sent yet. Fire self._requestDeferred because 
     1619        we've basically finished dealing with this L{Request}. Also, this 
     1620        forwards the L{Failure} to the user. 
     1621        """ 
     1622        self._requestDeferred.callback(None) 
     1623 
     1624 
     1625    def _cleanupOn100ContinueError_WAITING_100_CONTINUE_RESPONSE_BODY(self): 
     1626        """ 
     1627        The L{Request} body no sent yet. Fire self._requestDeferred because 
     1628        we've basically finished dealing with this L{Request}. Also, this 
     1629        forwards the L{Failure} to the user. 
     1630        """ 
     1631        self._requestDeferred.callback(None) 
     1632 
     1633 
     1634    def _cleanupOn100ContinueError_TRANSMITTING(self): 
     1635        """ 
     1636        We're currently sending the L{Request} body. The error will be sent to 
     1637        the user after the body has been sent. No cleanup needed. 
     1638        """ 
     1639 
     1640 
     1641    def _cleanupOn100ContinueError_WAITING(self): 
     1642        """ 
     1643        No cleanup needed. 
     1644        """ 
     1645 
     1646 
     1647    def _handle100ContinueError(self, err): 
     1648        """ 
     1649        Handle any L{Failure} that could occur while handling a L{Request} that 
     1650        expects 100-Continue. This are errors on parsing the first response 
     1651        and the first response's body and can occur in the 
     1652        WAITING_100_CONTINUE_RESPONSE/WAITING_100_CONTINUE_RESPONSE_BODY if 
     1653        the server supports expectations or TRANSMITTING/WAITING if the 
     1654        L{Request} body was sent after TIMEOUT_100_CONTINUE. Depending on 
     1655        the current state some cleanup needs to be performed and the L{Failure} 
     1656        is forwarded to self._responseDeferred. 
     1657 
     1658        @param err: L{Failure} to be forwarded. 
     1659        """ 
     1660        self._cleanupOn100ContinueError() 
     1661        self._responseDeferred.errback(err) 
     1662 
     1663 
    13141664    def _disconnectParser(self, reason): 
    13151665        """ 
    13161666        If there is still a parser, call its C{connectionLost} method with the 
     
    14191769        self._state = 'CONNECTION_LOST' 
    14201770 
    14211771 
     1772    def _connectionLost_WAITING_100_CONTINUE_RESPONSE(self, reason): 
     1773        """ 
     1774        Disconnect the parser so that it can propagate the event and move to 
     1775        the C{'CONNECTION_LOST'} state. 
     1776        """ 
     1777        self._disconnectParser(reason) 
     1778        self._state = 'CONNECTION_LOST' 
     1779 
     1780 
     1781    def _connectionLost_WAITING_100_CONTINUE_RESPONSE_BODY(self, reason): 
     1782        """ 
     1783        Disconnect the parser so that it can propagate the event and move to 
     1784        the C{'CONNECTION_LOST'} state. 
     1785        """ 
     1786        self._disconnectParser(reason) 
     1787        self._state = 'CONNECTION_LOST' 
     1788 
     1789 
    14221790    def abort(self): 
    14231791        """ 
    14241792        Close the connection and cause all outstanding L{request} L{Deferred}s