2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/server/proxygen/proxygen-server.h"
22 #include "hphp/runtime/server/fake-transport.h"
23 #include "hphp/runtime/server/http-server.h"
24 #include "hphp/runtime/server/proxygen/proxygen-transport.h"
25 #include "hphp/runtime/server/server-name-indication.h"
26 #include "hphp/runtime/server/server-stats.h"
27 #include "hphp/runtime/base/crash-reporter.h"
28 #include "hphp/runtime/base/runtime-option.h"
29 #include "hphp/runtime/base/program-functions.h"
30 #include "hphp/runtime/base/url.h"
31 #include "hphp/runtime/debugger/debugger.h"
32 #include "hphp/util/alloc.h"
33 #include "hphp/util/compatibility.h"
34 #include "hphp/util/process.h"
36 #include <folly/portability/Unistd.h>
37 #include <proxygen/lib/http/codec/HTTP2Constants.h>
41 using apache::thrift::transport::TTransportException
;
42 using folly::SocketAddress
;
43 using folly::AsyncServerSocket
;
44 using wangle::Acceptor
;
46 HPHPSessionAcceptor::HPHPSessionAcceptor(
47 const proxygen::AcceptorConfiguration
& config
,
48 ProxygenServer
*server
)
49 : HTTPSessionAcceptor(config
),
53 bool HPHPSessionAcceptor::canAccept(const SocketAddress
& /*address*/) {
54 // for now, we don't bother with the address whitelist
55 return m_server
->canAccept();
58 void HPHPSessionAcceptor::onIngressError(
59 const proxygen::HTTPSession
& /*session*/, proxygen::ProxygenError error
) {
60 // This method is invoked when the HTTP library encountered some error before
61 // it could completely parse the headers. Most of these are HTTP garbage
62 // (400 Bad Request) or client timeouts (408).
63 FakeTransport
transport((error
== proxygen::kErrorTimeout
) ? 408 : 400);
64 transport
.m_url
= folly::to
<std::string
>("/onIngressError?error=",
65 proxygen::getErrorString(error
));
66 m_server
->onRequestError(&transport
);
69 proxygen::HTTPTransaction::Handler
*
70 HPHPSessionAcceptor::newHandler(proxygen::HTTPTransaction
& /*txn*/,
71 proxygen::HTTPMessage
* /*msg*/) noexcept
{
72 auto transport
= std::make_shared
<ProxygenTransport
>(m_server
);
73 transport
->setTransactionReference(transport
);
74 return transport
.get();
77 void HPHPSessionAcceptor::onConnectionsDrained() {
78 m_server
->onConnectionsDrained();
81 ProxygenJob::ProxygenJob(std::shared_ptr
<ProxygenTransport
> t
) :
83 reqStart(t
->getRequestStart()) {
86 void ProxygenJob::getRequestStart(struct timespec
*outReqStart
) {
87 *outReqStart
= reqStart
;
90 ///////////////////////////////////////////////////////////////////////////////
91 // ProxygenTransportTraits
93 ProxygenTransportTraits::ProxygenTransportTraits(
94 std::shared_ptr
<ProxygenJob
> job
, void* opaque
, int /*id*/)
95 : server_((ProxygenServer
*)opaque
), transport_(std::move(job
->transport
)) {
96 VLOG(4) << "executing request with path=" << transport_
->getUrl();
99 ProxygenTransportTraits::~ProxygenTransportTraits() {
100 // ProxygenTransport must be released in worker thread
101 ProxygenTransport
*transport
= transport_
.get();
102 transport
->finish(std::move(transport_
));
105 void HPHPWorkerThread::setup() {
106 WorkerThread::setup();
110 void HPHPWorkerThread::cleanup() {
112 WorkerThread::cleanup();
115 ///////////////////////////////////////////////////////////////////////////////
116 ProxygenServer::ProxygenServer(
117 const ServerOptions
& options
118 ) : Server(options
.m_address
, options
.m_port
),
119 m_accept_sock(options
.m_serverFD
),
120 m_accept_sock_ssl(options
.m_sslFD
),
121 m_worker(&m_eventBaseManager
),
122 m_dispatcher(options
.m_maxThreads
,
123 RuntimeOption::ServerThreadDropCacheTimeoutSeconds
,
124 RuntimeOption::ServerThreadDropStack
,
125 this, RuntimeOption::ServerThreadJobLIFOSwitchThreshold
,
126 RuntimeOption::ServerThreadJobMaxQueuingMilliSeconds
,
127 kNumPriorities
, RuntimeOption::QueuedJobsReleaseRate
,
128 0, options
.m_initThreads
, options
.m_queueToWorkerRatio
) {
129 SocketAddress address
;
130 if (options
.m_address
.empty()) {
131 address
.setFromLocalPort(options
.m_port
);
133 address
.setFromHostPort(options
.m_address
, options
.m_port
);
135 m_httpConfig
.bindAddress
= address
;
136 m_httpConfig
.acceptBacklog
= RuntimeOption::ServerBacklog
;
137 m_httpsConfig
.acceptBacklog
= RuntimeOption::ServerBacklog
;
138 // TODO: proxygen only supports downstream keep-alive
139 std::chrono::seconds timeout
;
140 if (RuntimeOption::ConnectionTimeoutSeconds
> 0) {
141 timeout
= std::chrono::seconds(RuntimeOption::ConnectionTimeoutSeconds
);
143 // default to 50s (to match libevent)
144 timeout
= std::chrono::seconds(50);
146 m_httpConfig
.connectionIdleTimeout
= timeout
;
147 m_httpConfig
.transactionIdleTimeout
= timeout
;
148 m_httpsConfig
.connectionIdleTimeout
= timeout
;
149 m_httpsConfig
.transactionIdleTimeout
= timeout
;
151 if (RuntimeOption::ServerEnableH2C
) {
152 m_httpConfig
.allowedPlaintextUpgradeProtocols
= {
153 proxygen::http2::kProtocolCleartextString
};
154 // Set flow control (for uploads) to 1MB. We could also make this
155 // configurable if needed
156 m_httpConfig
.initialReceiveWindow
= 1 << 20;
157 m_httpConfig
.receiveSessionWindowSize
= 1 << 20;
160 if (!options
.m_takeoverFilename
.empty()) {
161 m_takeover_agent
.reset(new TakeoverAgent(options
.m_takeoverFilename
));
163 const std::vector
<std::chrono::seconds
> levels
{
164 std::chrono::seconds(10), std::chrono::seconds(120)};
165 ProxygenTransport::s_requestErrorCount
=
166 ServiceData::createTimeSeries("http_response_error",
167 {ServiceData::StatsType::COUNT
},
169 ProxygenTransport::s_requestNonErrorCount
=
170 ServiceData::createTimeSeries("http_response_nonerror",
171 {ServiceData::StatsType::COUNT
},
175 ProxygenServer::~ProxygenServer() {
176 Logger::Verbose("%p: destroying ProxygenServer", this);
178 Logger::Verbose("%p: ProxygenServer destroyed", this);
181 int ProxygenServer::onTakeoverRequest(TakeoverAgent::RequestType type
) {
182 if (type
== TakeoverAgent::RequestType::LISTEN_SOCKET
) {
183 // Subsequent calls to ProxygenServer::stop() won't do anything.
184 // The server continues accepting until RequestType::TERMINATE is
186 setStatus(RunStatus::STOPPING
);
187 } else if (type
== TakeoverAgent::RequestType::TERMINATE
) {
188 stopListening(true /*hard*/);
189 // No need to do m_takeover_agent->stop(), as the afdt server is
190 // going to be closed when this returns.
195 void ProxygenServer::takeoverAborted() {
196 m_httpServerSocket
.reset();
199 void ProxygenServer::addTakeoverListener(TakeoverListener
* listener
) {
200 if (m_takeover_agent
) {
201 m_takeover_agent
->addTakeoverListener(listener
);
205 void ProxygenServer::removeTakeoverListener(TakeoverListener
* listener
) {
206 if (m_takeover_agent
) {
207 m_takeover_agent
->removeTakeoverListener(listener
);
211 void ProxygenServer::start() {
212 m_httpServerSocket
.reset(new AsyncServerSocket(m_worker
.getEventBase()));
213 bool needListen
= true;
214 auto failedToListen
= [](const std::exception
& ex
,
215 const folly::SocketAddress
& addr
) {
216 Logger::Error("failed to listen: %s", ex
.what());
217 throw FailedToListenException(addr
.getAddressStr(), addr
.getPort());
221 if (m_accept_sock
>= 0) {
222 Logger::Info("inheritfd: using inherited fd %d for server",
224 m_httpServerSocket
->useExistingSocket(m_accept_sock
);
226 // make it possible to quickly reuse the port
227 m_httpServerSocket
->setReusePortEnabled(RuntimeOption::StopOldServer
);
228 m_httpServerSocket
->bind(m_httpConfig
.bindAddress
);
230 } catch (const std::system_error
& ex
) {
231 bool takoverSucceeded
= false;
232 if (ex
.code().value() == EADDRINUSE
&&
234 m_accept_sock
= m_takeover_agent
->takeover();
235 if (m_accept_sock
>= 0) {
236 Logger::Info("takeover: using takeover fd %d for server",
238 m_httpServerSocket
->useExistingSocket(m_accept_sock
);
240 m_takeover_agent
->requestShutdown();
241 takoverSucceeded
= true;
244 if (!takoverSucceeded
) {
245 failedToListen(ex
, m_httpConfig
.bindAddress
);
248 if (m_takeover_agent
) {
249 m_takeover_agent
->setupFdServer(m_worker
.getEventBase()->getLibeventBase(),
250 m_httpServerSocket
->getSocket(), this);
253 m_httpAcceptor
.reset(new HPHPSessionAcceptor(m_httpConfig
, this));
254 m_httpAcceptor
->init(m_httpServerSocket
.get(), m_worker
.getEventBase());
256 if (m_httpConfig
.isSSL() || m_httpsConfig
.isSSL()) {
257 if (!RuntimeOption::SSLCertificateDir
.empty()) {
258 ServerNameIndication::load(RuntimeOption::SSLCertificateDir
,
259 std::bind(&ProxygenServer::initialCertHandler
,
261 std::placeholders::_1
,
262 std::placeholders::_2
,
263 std::placeholders::_3
,
264 std::placeholders::_4
));
267 if (m_httpsConfig
.isSSL()) {
268 m_httpsServerSocket
.reset(new AsyncServerSocket(m_worker
.getEventBase()));
270 if (m_accept_sock_ssl
>= 0) {
271 Logger::Info("inheritfd: using inherited fd %d for ssl",
273 m_httpsServerSocket
->useExistingSocket(m_accept_sock_ssl
);
275 m_httpsServerSocket
->setReusePortEnabled(RuntimeOption::StopOldServer
);
276 m_httpsServerSocket
->bind(m_httpsConfig
.bindAddress
);
278 } catch (const TTransportException
& ex
) {
279 failedToListen(ex
, m_httpsConfig
.bindAddress
);
282 m_httpsAcceptor
.reset(new HPHPSessionAcceptor(m_httpsConfig
, this));
284 m_httpsAcceptor
->init(m_httpsServerSocket
.get(), m_worker
.getEventBase());
285 } catch (const std::exception
& ex
) {
286 // Could be some cert thing
287 failedToListen(ex
, m_httpsConfig
.bindAddress
);
292 m_httpServerSocket
->listen(m_httpConfig
.acceptBacklog
);
293 } catch (const std::system_error
& ex
) {
294 failedToListen(ex
, m_httpConfig
.bindAddress
);
297 if (m_httpsServerSocket
) {
299 m_httpsServerSocket
->listen(m_httpsConfig
.acceptBacklog
);
300 } catch (const std::system_error
& ex
) {
301 failedToListen(ex
, m_httpsConfig
.bindAddress
);
304 m_httpServerSocket
->startAccepting();
305 if (m_httpsServerSocket
) {
306 m_httpsServerSocket
->startAccepting();
308 startConsuming(m_worker
.getEventBase(), &m_responseQueue
);
310 setStatus(RunStatus::RUNNING
);
311 folly::AsyncTimeout::attachEventBase(m_worker
.getEventBase());
313 m_dispatcher
.start();
316 void ProxygenServer::waitForEnd() {
317 Logger::Info("%p: Waiting for ProxygenServer port=%d", this, m_port
);
318 // m_worker.wait is always safe to call from any thread at any time.
322 // Server shutdown - Explained
324 // 0. An alarm may be set in http-server.cpp to kill this process after
325 // ServerPreShutdownWait + ServerShutdownListenWait +
326 // ServerGracefulShutdownWait seconds
327 // 1. Set run status to STOPPING. This should fail downstream healthchecks.
328 // If it is the page server, it will continue accepting requests for
329 // ServerPreShutdownWait seconds
330 // 2. Shutdown the listen sockets, this will
331 // 2.a Close any idle connections
332 // 2.b Send SPDY GOAWAY frames
333 // 2.c Insert Connection: close on HTTP/1.1 keep-alive connections as the
334 // response headers are sent
335 // 3. If the server hasn't received the entire request body
336 // ServerShutdownEOM seconds after shutdown starts, the request
337 // will be aborted. ServerShutdownEOM isn't required to be smaller
338 // than ServerShutdownListenWait, but it makes sense to make it be,
339 // in order to make shutdown faster.
340 // 4. After all requests finish executing OR all connections close OR
341 // ServerShutdownListenWait seconds elapse, stop the VM.
342 // Incomplete requests in the I/O thread will not be executed.
343 // Stopping the VM is synchronous and all requests will run to
344 // completion, unless the alarm fires.
345 // 5. Allow responses to drain for up to ServerGracefulShutdownWait seconds.
346 // Note if shutting the VM down took non-zero time it's possible that the
347 // alarm will fire first and kill this process.
349 void ProxygenServer::stop() {
350 if (getStatus() != RunStatus::RUNNING
||
351 m_shutdownState
!= ShutdownState::SHUTDOWN_NONE
) return;
353 Logger::Info("%p: Stopping ProxygenServer port=%d", this, m_port
);
355 setStatus(RunStatus::STOPPING
);
357 if (m_takeover_agent
) {
358 m_worker
.getEventBase()->runInEventBaseThread([this] {
359 m_takeover_agent
->stop();
363 // close listening sockets, this will initiate draining, including closing
365 m_worker
.getEventBase()->runInEventBaseThread([this] {
366 // Only wait ServerPreShutdownWait seconds for the page server.
367 int delayMilliSeconds
= RuntimeOption::ServerPreShutdownWait
* 1000;
368 if (delayMilliSeconds
< 0 || getPort() != RuntimeOption::ServerPort
) {
369 delayMilliSeconds
= 0;
371 m_worker
.getEventBase()->runAfterDelay([this] { stopListening(); },
373 reportShutdownStatus();
377 void ProxygenServer::stopListening(bool hard
) {
378 m_shutdownState
= ShutdownState::DRAINING_READS
;
379 HttpServer::MarkShutdownStat(ShutdownEvent::SHUTDOWN_DRAIN_READS
);
380 #define SHUT_FBLISTEN 3
382 * Modifications to the Linux kernel to support shutting down a listen
383 * socket for new connections only, but anything which has completed
384 * the TCP handshake will still be accepted. This allows for un-accepted
385 * connections to be queued and then wait until all queued requests are
386 * actively being processed.
389 // triggers acceptStopped/sets acceptor state to Draining
391 m_httpServerSocket
.reset();
392 m_httpsServerSocket
.reset();
394 if (m_httpServerSocket
) {
395 m_httpServerSocket
->stopAccepting(SHUT_FBLISTEN
);
397 if (m_httpsServerSocket
) {
398 m_httpsServerSocket
->stopAccepting(SHUT_FBLISTEN
);
402 if (RuntimeOption::ServerShutdownListenWait
> 0) {
403 std::chrono::seconds
s(RuntimeOption::ServerShutdownListenWait
);
404 VLOG(4) << this << ": scheduling shutdown listen timeout="
405 << s
.count() << " port=" << m_port
;
407 if (RuntimeOption::ServerShutdownEOMWait
> 0) {
408 int delayMilliSeconds
= RuntimeOption::ServerShutdownEOMWait
* 1000;
409 m_worker
.getEventBase()->runAfterDelay(
410 [this] { abortPendingTransports(); }, delayMilliSeconds
);
417 void ProxygenServer::returnPartialPosts() {
418 VLOG(2) << "Running returnPartialPosts for "
419 << m_pendingTransports
.size() << " pending transports";
420 for (auto& transport
: m_pendingTransports
) {
421 if (!transport
.getClientComplete()) {
422 transport
.beginPartialPostEcho();
427 void ProxygenServer::abortPendingTransports() {
428 if (!m_pendingTransports
.empty()) {
429 Logger::Warning("aborting %lu incomplete requests",
430 m_pendingTransports
.size());
431 // Avoid iterating the list, as abort() will unlink(), leaving the
432 // list iterator in a corrupt state.
434 auto& transport
= m_pendingTransports
.front();
435 transport
.abort(); // will unlink()
436 } while (!m_pendingTransports
.empty());
438 // Accelerate shutdown if all requests that were enqueued are done,
439 // since no more is coming in.
440 if (m_enqueuedCount
== 0 &&
441 m_shutdownState
== ShutdownState::DRAINING_READS
) {
446 void ProxygenServer::onConnectionsDrained() {
448 Logger::Info("All connections drained from ProxygenServer drainCount=%d",
451 // both servers have to finish
452 Logger::Verbose("%p: waiting for other server port=%d", this, m_port
);
456 // Stop the graceful shutdown timer
459 // proceed to next shutdown phase
463 void ProxygenServer::timeoutExpired() noexcept
{
464 Logger::Info("%p: shutdown timer expired for ProxygenServer port=%d, "
465 "state=%d, a/q/e %d/%d/%d", this, m_port
, (int)m_shutdownState
,
466 getActiveWorker(), getQueuedJobs(),
467 getLibEventConnectionCount());
468 // proceed to next shutdown phase
472 void ProxygenServer::doShutdown() {
473 switch(m_shutdownState
) {
474 case ShutdownState::SHUTDOWN_NONE
:
475 // Transition from SHUTDOWN_NONE to DRAINING_READS needs to happen
476 // explicitly through `stopListening`, not here.
479 case ShutdownState::DRAINING_READS
:
480 // Even though connections may be open for reading, they will not be
481 // executed in the VM
484 case ShutdownState::STOPPING_VM
:
485 // this is a no-op, and can happen when we triggered VM shutdown from
486 // the timeout code path, but the connections drain while waiting for
487 // shutdown. We let the VM shutdown continue and it will advance us
488 // to the next state.
490 case ShutdownState::DRAINING_WRITES
:
496 void ProxygenServer::stopVM() {
497 m_shutdownState
= ShutdownState::STOPPING_VM
;
498 HttpServer::MarkShutdownStat(ShutdownEvent::SHUTDOWN_DRAIN_DISPATCHER
);
499 // we can't call m_dispatcher.stop() from the event loop, because it blocks
500 // all I/O. Spawn a thread to call it and callback when it's done.
501 std::thread
vmStopper([this] {
503 Logger::Info("%p: Stopping dispatcher port=%d", this, m_port
);
505 Logger::Info("%p: Dispatcher stopped port=%d. conns=%d", this, m_port
,
506 getLibEventConnectionCount());
507 m_worker
.getEventBase()->runInEventBaseThread([this] {
514 void ProxygenServer::vmStopped() {
515 m_shutdownState
= ShutdownState::DRAINING_WRITES
;
516 HttpServer::MarkShutdownStat(ShutdownEvent::SHUTDOWN_DRAIN_WRITES
);
517 if (!drained() && RuntimeOption::ServerGracefulShutdownWait
> 0) {
518 m_worker
.getEventBase()->runInEventBaseThread([&] {
519 std::chrono::seconds
s(RuntimeOption::ServerGracefulShutdownWait
);
520 VLOG(4) << this << ": scheduling graceful timeout=" << s
.count() <<
529 void ProxygenServer::forceStop() {
530 Logger::Info("%p: forceStop ProxygenServer port=%d, enqueued=%d, conns=%d",
531 this, m_port
, m_enqueuedCount
, getLibEventConnectionCount());
532 m_httpServerSocket
.reset();
533 m_httpsServerSocket
.reset();
535 // Drops all open connections
536 if (m_httpAcceptor
&& m_httpAcceptor
->getState() < Acceptor::State::kDone
) {
537 m_httpAcceptor
->forceStop();
539 if (m_httpsAcceptor
&& m_httpAcceptor
->getState() < Acceptor::State::kDone
) {
540 m_httpsAcceptor
->forceStop();
543 // No more responses coming from worker threads
545 Logger::Verbose("%p: Stopped response queue consumer port=%d", this, m_port
);
547 // The worker should exit gracefully
548 m_worker
.stopWhenIdle();
549 Logger::Verbose("%p: i/o thread notified to stop port=%d", this, m_port
);
551 // Aaaand we're done - oops not thread safe. Does it matter?
552 setStatus(RunStatus::STOPPED
);
554 HttpServer::MarkShutdownStat(ShutdownEvent::SHUTDOWN_DONE
);
556 for (auto listener
: m_listeners
) {
557 listener
->serverStopped(this);
561 void ProxygenServer::reportShutdownStatus() {
562 if (m_port
!= RuntimeOption::ServerPort
) return;
563 if (getStatus() == RunStatus::STOPPED
) return;
564 Logger::FInfo("Shutdown state={}, a/q/e/p {}/{}/{}/{}, RSS={}Mb",
565 static_cast<int>(m_shutdownState
),
568 getLibEventConnectionCount(),
569 m_pendingTransports
.size(),
570 Process::GetProcessRSS(getpid()));
571 m_worker
.getEventBase()->runAfterDelay([this]{reportShutdownStatus();}, 500);
574 bool ProxygenServer::canAccept() {
575 return (RuntimeOption::ServerConnectionLimit
== 0 ||
576 getLibEventConnectionCount() < RuntimeOption::ServerConnectionLimit
);
579 int ProxygenServer::getLibEventConnectionCount() {
580 uint32_t conns
= m_httpAcceptor
->getNumConnections();
581 if (m_httpsAcceptor
) {
582 conns
+= m_httpsAcceptor
->getNumConnections();
587 bool ProxygenServer::initialCertHandler(const std::string
& /*server_name*/,
588 const std::string
& key_file
,
589 const std::string
& cert_file
,
595 wangle::SSLContextConfig sslCtxConfig
;
596 sslCtxConfig
.setCertificate(cert_file
, key_file
, "");
597 sslCtxConfig
.sslVersion
= folly::SSLContext::TLSv1
;
598 sslCtxConfig
.sniNoMatchFn
=
599 std::bind(&ProxygenServer::sniNoMatchHandler
, this,
600 std::placeholders::_1
);
601 sslCtxConfig
.setNextProtocols({
602 std::begin(RuntimeOption::ServerNextProtocols
),
603 std::end(RuntimeOption::ServerNextProtocols
)
605 if (m_httpsConfig
.isSSL()) {
606 m_httpsConfig
.sslContextConfigs
.emplace_back(sslCtxConfig
);
608 if (m_httpConfig
.isSSL()) {
609 m_httpConfig
.sslContextConfigs
.emplace_back(sslCtxConfig
);
612 } catch (const std::exception
&ex
) {
613 Logger::Error(folly::to
<std::string
>(
614 "Invalid certificate file or key file: ", ex
.what()));
619 bool ProxygenServer::dynamicCertHandler(const std::string
& /*server_name*/,
620 const std::string
& key_file
,
621 const std::string
& cert_file
) {
623 wangle::SSLContextConfig sslCtxConfig
;
624 sslCtxConfig
.setCertificate(cert_file
, key_file
, "");
625 sslCtxConfig
.sslVersion
= folly::SSLContext::TLSv1
;
626 sslCtxConfig
.sniNoMatchFn
=
627 std::bind(&ProxygenServer::sniNoMatchHandler
, this,
628 std::placeholders::_1
);
629 sslCtxConfig
.setNextProtocols({
630 std::begin(RuntimeOption::ServerNextProtocols
),
631 std::end(RuntimeOption::ServerNextProtocols
)
633 if (m_httpsAcceptor
) {
634 m_httpsAcceptor
->addSSLContextConfig(sslCtxConfig
);
636 if (m_httpConfig
.isSSL()) {
637 m_httpAcceptor
->addSSLContextConfig(sslCtxConfig
);
640 } catch (const std::exception
&ex
) {
641 Logger::Error("Invalid certificate file or key file: %s", ex
.what());
646 bool ProxygenServer::sniNoMatchHandler(const char *server_name
) {
648 if (!RuntimeOption::SSLCertificateDir
.empty()) {
649 static std::mutex dynLoadMutex
;
650 if (!dynLoadMutex
.try_lock()) return false;
651 SCOPE_EXIT
{ dynLoadMutex
.unlock(); };
654 Logger::Warning("Reloading SSL certificates upon server name %s",
656 ServerNameIndication::load(RuntimeOption::SSLCertificateDir
,
657 std::bind(&ProxygenServer::dynamicCertHandler
,
659 std::placeholders::_1
,
660 std::placeholders::_2
,
661 std::placeholders::_3
));
664 } catch (const std::exception
&ex
) {
665 Logger::Error("Failed to reload certificate files or key files");
670 bool ProxygenServer::enableSSL(int port
) {
674 SocketAddress address
= m_httpConfig
.bindAddress
;
675 address
.setPort(port
);
677 m_httpsConfig
.bindAddress
= address
;
678 m_httpsConfig
.strictSSL
= false;
679 m_httpsConfig
.sslContextConfigs
.emplace_back(createContextConfig());
684 bool ProxygenServer::enableSSLWithPlainText() {
685 m_httpConfig
.strictSSL
= false;
686 m_httpConfig
.sslContextConfigs
.emplace_back(createContextConfig());
687 m_httpConfig
.allowInsecureConnectionsOnSecureServer
= true;
691 wangle::SSLContextConfig
ProxygenServer::createContextConfig() {
692 wangle::SSLContextConfig cfg
;
693 if (RuntimeOption::SSLCertificateFile
!= "" &&
694 RuntimeOption::SSLCertificateKeyFile
!= "") {
696 cfg
.setCertificate(RuntimeOption::SSLCertificateFile
,
697 RuntimeOption::SSLCertificateKeyFile
, "");
698 cfg
.sslVersion
= folly::SSLContext::TLSv1
;
699 cfg
.isDefault
= true;
701 std::bind(&ProxygenServer::sniNoMatchHandler
, this,
702 std::placeholders::_1
);
703 cfg
.setNextProtocols({
704 std::begin(RuntimeOption::ServerNextProtocols
),
705 std::end(RuntimeOption::ServerNextProtocols
)
707 } catch (const std::exception
&ex
) {
708 Logger::Error("Invalid certificate file or key file: %s", ex
.what());
711 Logger::Error("Invalid certificate file or key file");
716 void ProxygenServer::onRequest(std::shared_ptr
<ProxygenTransport
> transport
) {
718 Logger::Error("Discarding request while crashing");
719 if (m_shutdownState
== ShutdownState::SHUTDOWN_NONE
) {
720 m_shutdownState
= ShutdownState::DRAINING_READS
;
722 m_httpServerSocket
.reset();
723 m_httpsServerSocket
.reset();
728 if (getStatus() == RunStatus::RUNNING
||
729 (getStatus() == RunStatus::STOPPING
&&
730 m_shutdownState
<= ShutdownState::DRAINING_READS
)) {
731 RequestPriority priority
= getRequestPriority(transport
->getUrl());
732 VLOG(4) << this << ": enqueing request with path=" << transport
->getUrl() <<
733 " and priority=" << priority
;
735 transport
->setEnqueued();
736 m_dispatcher
.enqueue(std::make_shared
<ProxygenJob
>(transport
), priority
);
740 Logger::Error("%p: throwing away one new request while shutting down",
745 void ProxygenServer::decrementEnqueuedCount() {
747 if (m_enqueuedCount
== 0 && isScheduled()) {
748 // If all requests that got enqueued are done, and no more request
749 // is coming in, accelerate shutdown.
750 if ((m_shutdownState
== ShutdownState::DRAINING_READS
&&
751 m_pendingTransports
.empty()) ||
752 m_shutdownState
== ShutdownState::DRAINING_WRITES
) {
759 ProxygenServer::RequestPriority
ProxygenServer::getRequestPriority(
761 auto const command
= URL::getCommand(URL::getServerObject(uri
));
762 if (RuntimeOption::ServerHighPriorityEndPoints
.find(command
) ==
763 RuntimeOption::ServerHighPriorityEndPoints
.end()) {
764 return PRIORITY_NORMAL
;
766 return PRIORITY_HIGH
;
769 void ProxygenServer::onRequestError(Transport
* transport
) {
771 m_handler
= createRequestHandler();
779 Timer::GetMonotonicTime(start
);
780 transport
->onRequestStart(start
);
781 m_handler
->logToAccessLog(transport
);
785 ///////////////////////////////////////////////////////////////////////////////