Resize destination bus to the actual number of decoded frames.
[chromium-blink-merge.git] / net / socket / client_socket_pool_base.cc
blobcec7956a0ee26a6a5c7e80e4ddf72822aeb1cc8f
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/socket/client_socket_pool_base.h"
7 #include "base/compiler_specific.h"
8 #include "base/format_macros.h"
9 #include "base/logging.h"
10 #include "base/message_loop/message_loop.h"
11 #include "base/metrics/stats_counters.h"
12 #include "base/stl_util.h"
13 #include "base/strings/string_util.h"
14 #include "base/time/time.h"
15 #include "base/values.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/net_log.h"
18 #include "net/socket/client_socket_handle.h"
20 using base::TimeDelta;
22 namespace net {
24 namespace {
26 // Indicate whether we should enable idle socket cleanup timer. When timer is
27 // disabled, sockets are closed next time a socket request is made.
28 bool g_cleanup_timer_enabled = true;
30 // The timeout value, in seconds, used to clean up idle sockets that can't be
31 // reused.
33 // Note: It's important to close idle sockets that have received data as soon
34 // as possible because the received data may cause BSOD on Windows XP under
35 // some conditions. See http://crbug.com/4606.
36 const int kCleanupInterval = 10; // DO NOT INCREASE THIS TIMEOUT.
38 // Indicate whether or not we should establish a new transport layer connection
39 // after a certain timeout has passed without receiving an ACK.
40 bool g_connect_backup_jobs_enabled = true;
42 // Compares the effective priority of two results, and returns 1 if |request1|
43 // has greater effective priority than |request2|, 0 if they have the same
44 // effective priority, and -1 if |request2| has the greater effective priority.
45 // Requests with |ignore_limits| set have higher effective priority than those
46 // without. If both requests have |ignore_limits| set/unset, then the request
47 // with the highest Pririoty has the highest effective priority. Does not take
48 // into account the fact that Requests are serviced in FIFO order if they would
49 // otherwise have the same priority.
50 int CompareEffectiveRequestPriority(
51 const internal::ClientSocketPoolBaseHelper::Request& request1,
52 const internal::ClientSocketPoolBaseHelper::Request& request2) {
53 if (request1.ignore_limits() && !request2.ignore_limits())
54 return 1;
55 if (!request1.ignore_limits() && request2.ignore_limits())
56 return -1;
57 if (request1.priority() > request2.priority())
58 return 1;
59 if (request1.priority() < request2.priority())
60 return -1;
61 return 0;
64 } // namespace
66 ConnectJob::ConnectJob(const std::string& group_name,
67 base::TimeDelta timeout_duration,
68 RequestPriority priority,
69 Delegate* delegate,
70 const BoundNetLog& net_log)
71 : group_name_(group_name),
72 timeout_duration_(timeout_duration),
73 priority_(priority),
74 delegate_(delegate),
75 net_log_(net_log),
76 idle_(true) {
77 DCHECK(!group_name.empty());
78 DCHECK(delegate);
79 net_log.BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB,
80 NetLog::StringCallback("group_name", &group_name_));
83 ConnectJob::~ConnectJob() {
84 net_log().EndEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB);
87 scoped_ptr<StreamSocket> ConnectJob::PassSocket() {
88 return socket_.Pass();
91 int ConnectJob::Connect() {
92 if (timeout_duration_ != base::TimeDelta())
93 timer_.Start(FROM_HERE, timeout_duration_, this, &ConnectJob::OnTimeout);
95 idle_ = false;
97 LogConnectStart();
99 int rv = ConnectInternal();
101 if (rv != ERR_IO_PENDING) {
102 LogConnectCompletion(rv);
103 delegate_ = NULL;
106 return rv;
109 void ConnectJob::SetSocket(scoped_ptr<StreamSocket> socket) {
110 if (socket) {
111 net_log().AddEvent(NetLog::TYPE_CONNECT_JOB_SET_SOCKET,
112 socket->NetLog().source().ToEventParametersCallback());
114 socket_ = socket.Pass();
117 void ConnectJob::NotifyDelegateOfCompletion(int rv) {
118 // The delegate will own |this|.
119 Delegate* delegate = delegate_;
120 delegate_ = NULL;
122 LogConnectCompletion(rv);
123 delegate->OnConnectJobComplete(rv, this);
126 void ConnectJob::ResetTimer(base::TimeDelta remaining_time) {
127 timer_.Stop();
128 timer_.Start(FROM_HERE, remaining_time, this, &ConnectJob::OnTimeout);
131 void ConnectJob::LogConnectStart() {
132 connect_timing_.connect_start = base::TimeTicks::Now();
133 net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT);
136 void ConnectJob::LogConnectCompletion(int net_error) {
137 connect_timing_.connect_end = base::TimeTicks::Now();
138 net_log().EndEventWithNetErrorCode(
139 NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT, net_error);
142 void ConnectJob::OnTimeout() {
143 // Make sure the socket is NULL before calling into |delegate|.
144 SetSocket(scoped_ptr<StreamSocket>());
146 net_log_.AddEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_TIMED_OUT);
148 NotifyDelegateOfCompletion(ERR_TIMED_OUT);
151 namespace internal {
153 ClientSocketPoolBaseHelper::Request::Request(
154 ClientSocketHandle* handle,
155 const CompletionCallback& callback,
156 RequestPriority priority,
157 bool ignore_limits,
158 Flags flags,
159 const BoundNetLog& net_log)
160 : handle_(handle),
161 callback_(callback),
162 priority_(priority),
163 ignore_limits_(ignore_limits),
164 flags_(flags),
165 net_log_(net_log) {}
167 ClientSocketPoolBaseHelper::Request::~Request() {}
169 ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper(
170 HigherLayeredPool* pool,
171 int max_sockets,
172 int max_sockets_per_group,
173 base::TimeDelta unused_idle_socket_timeout,
174 base::TimeDelta used_idle_socket_timeout,
175 ConnectJobFactory* connect_job_factory)
176 : idle_socket_count_(0),
177 connecting_socket_count_(0),
178 handed_out_socket_count_(0),
179 max_sockets_(max_sockets),
180 max_sockets_per_group_(max_sockets_per_group),
181 use_cleanup_timer_(g_cleanup_timer_enabled),
182 unused_idle_socket_timeout_(unused_idle_socket_timeout),
183 used_idle_socket_timeout_(used_idle_socket_timeout),
184 connect_job_factory_(connect_job_factory),
185 connect_backup_jobs_enabled_(false),
186 pool_generation_number_(0),
187 pool_(pool),
188 weak_factory_(this) {
189 DCHECK_LE(0, max_sockets_per_group);
190 DCHECK_LE(max_sockets_per_group, max_sockets);
192 NetworkChangeNotifier::AddIPAddressObserver(this);
195 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() {
196 // Clean up any idle sockets and pending connect jobs. Assert that we have no
197 // remaining active sockets or pending requests. They should have all been
198 // cleaned up prior to |this| being destroyed.
199 FlushWithError(ERR_ABORTED);
200 DCHECK(group_map_.empty());
201 DCHECK(pending_callback_map_.empty());
202 DCHECK_EQ(0, connecting_socket_count_);
203 CHECK(higher_pools_.empty());
205 NetworkChangeNotifier::RemoveIPAddressObserver(this);
207 // Remove from lower layer pools.
208 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin();
209 it != lower_pools_.end();
210 ++it) {
211 (*it)->RemoveHigherLayeredPool(pool_);
215 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair()
216 : result(OK) {
219 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair(
220 const CompletionCallback& callback_in, int result_in)
221 : callback(callback_in),
222 result(result_in) {
225 ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {}
227 bool ClientSocketPoolBaseHelper::IsStalled() const {
228 // If a lower layer pool is stalled, consider |this| stalled as well.
229 for (std::set<LowerLayeredPool*>::const_iterator it = lower_pools_.begin();
230 it != lower_pools_.end();
231 ++it) {
232 if ((*it)->IsStalled())
233 return true;
236 // If fewer than |max_sockets_| are in use, then clearly |this| is not
237 // stalled.
238 if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
239 return false;
240 // So in order to be stalled, |this| must be using at least |max_sockets_| AND
241 // |this| must have a request that is actually stalled on the global socket
242 // limit. To find such a request, look for a group that has more requests
243 // than jobs AND where the number of sockets is less than
244 // |max_sockets_per_group_|. (If the number of sockets is equal to
245 // |max_sockets_per_group_|, then the request is stalled on the group limit,
246 // which does not count.)
247 for (GroupMap::const_iterator it = group_map_.begin();
248 it != group_map_.end(); ++it) {
249 if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_))
250 return true;
252 return false;
255 void ClientSocketPoolBaseHelper::AddLowerLayeredPool(
256 LowerLayeredPool* lower_pool) {
257 DCHECK(pool_);
258 CHECK(!ContainsKey(lower_pools_, lower_pool));
259 lower_pools_.insert(lower_pool);
260 lower_pool->AddHigherLayeredPool(pool_);
263 void ClientSocketPoolBaseHelper::AddHigherLayeredPool(
264 HigherLayeredPool* higher_pool) {
265 CHECK(higher_pool);
266 CHECK(!ContainsKey(higher_pools_, higher_pool));
267 higher_pools_.insert(higher_pool);
270 void ClientSocketPoolBaseHelper::RemoveHigherLayeredPool(
271 HigherLayeredPool* higher_pool) {
272 CHECK(higher_pool);
273 CHECK(ContainsKey(higher_pools_, higher_pool));
274 higher_pools_.erase(higher_pool);
277 int ClientSocketPoolBaseHelper::RequestSocket(
278 const std::string& group_name,
279 scoped_ptr<const Request> request) {
280 CHECK(!request->callback().is_null());
281 CHECK(request->handle());
283 // Cleanup any timed-out idle sockets if no timer is used.
284 if (!use_cleanup_timer_)
285 CleanupIdleSockets(false);
287 request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL);
288 Group* group = GetOrCreateGroup(group_name);
290 int rv = RequestSocketInternal(group_name, *request);
291 if (rv != ERR_IO_PENDING) {
292 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
293 CHECK(!request->handle()->is_initialized());
294 request.reset();
295 } else {
296 group->InsertPendingRequest(request.Pass());
297 // Have to do this asynchronously, as closing sockets in higher level pools
298 // call back in to |this|, which will cause all sorts of fun and exciting
299 // re-entrancy issues if the socket pool is doing something else at the
300 // time.
301 if (group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
302 base::MessageLoop::current()->PostTask(
303 FROM_HERE,
304 base::Bind(
305 &ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools,
306 weak_factory_.GetWeakPtr()));
309 return rv;
312 void ClientSocketPoolBaseHelper::RequestSockets(
313 const std::string& group_name,
314 const Request& request,
315 int num_sockets) {
316 DCHECK(request.callback().is_null());
317 DCHECK(!request.handle());
319 // Cleanup any timed out idle sockets if no timer is used.
320 if (!use_cleanup_timer_)
321 CleanupIdleSockets(false);
323 if (num_sockets > max_sockets_per_group_) {
324 num_sockets = max_sockets_per_group_;
327 request.net_log().BeginEvent(
328 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS,
329 NetLog::IntegerCallback("num_sockets", num_sockets));
331 Group* group = GetOrCreateGroup(group_name);
333 // RequestSocketsInternal() may delete the group.
334 bool deleted_group = false;
336 int rv = OK;
337 for (int num_iterations_left = num_sockets;
338 group->NumActiveSocketSlots() < num_sockets &&
339 num_iterations_left > 0 ; num_iterations_left--) {
340 rv = RequestSocketInternal(group_name, request);
341 if (rv < 0 && rv != ERR_IO_PENDING) {
342 // We're encountering a synchronous error. Give up.
343 if (!ContainsKey(group_map_, group_name))
344 deleted_group = true;
345 break;
347 if (!ContainsKey(group_map_, group_name)) {
348 // Unexpected. The group should only be getting deleted on synchronous
349 // error.
350 NOTREACHED();
351 deleted_group = true;
352 break;
356 if (!deleted_group && group->IsEmpty())
357 RemoveGroup(group_name);
359 if (rv == ERR_IO_PENDING)
360 rv = OK;
361 request.net_log().EndEventWithNetErrorCode(
362 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, rv);
365 int ClientSocketPoolBaseHelper::RequestSocketInternal(
366 const std::string& group_name,
367 const Request& request) {
368 ClientSocketHandle* const handle = request.handle();
369 const bool preconnecting = !handle;
370 Group* group = GetOrCreateGroup(group_name);
372 if (!(request.flags() & NO_IDLE_SOCKETS)) {
373 // Try to reuse a socket.
374 if (AssignIdleSocketToRequest(request, group))
375 return OK;
378 // If there are more ConnectJobs than pending requests, don't need to do
379 // anything. Can just wait for the extra job to connect, and then assign it
380 // to the request.
381 if (!preconnecting && group->TryToUseUnassignedConnectJob())
382 return ERR_IO_PENDING;
384 // Can we make another active socket now?
385 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
386 !request.ignore_limits()) {
387 // TODO(willchan): Consider whether or not we need to close a socket in a
388 // higher layered group. I don't think this makes sense since we would just
389 // reuse that socket then if we needed one and wouldn't make it down to this
390 // layer.
391 request.net_log().AddEvent(
392 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP);
393 return ERR_IO_PENDING;
396 if (ReachedMaxSocketsLimit() && !request.ignore_limits()) {
397 // NOTE(mmenke): Wonder if we really need different code for each case
398 // here. Only reason for them now seems to be preconnects.
399 if (idle_socket_count() > 0) {
400 // There's an idle socket in this pool. Either that's because there's
401 // still one in this group, but we got here due to preconnecting bypassing
402 // idle sockets, or because there's an idle socket in another group.
403 bool closed = CloseOneIdleSocketExceptInGroup(group);
404 if (preconnecting && !closed)
405 return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
406 } else {
407 // We could check if we really have a stalled group here, but it requires
408 // a scan of all groups, so just flip a flag here, and do the check later.
409 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS);
410 return ERR_IO_PENDING;
414 // We couldn't find a socket to reuse, and there's space to allocate one,
415 // so allocate and connect a new one.
416 scoped_ptr<ConnectJob> connect_job(
417 connect_job_factory_->NewConnectJob(group_name, request, this));
419 int rv = connect_job->Connect();
420 if (rv == OK) {
421 LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
422 if (!preconnecting) {
423 HandOutSocket(connect_job->PassSocket(), false /* not reused */,
424 connect_job->connect_timing(), handle, base::TimeDelta(),
425 group, request.net_log());
426 } else {
427 AddIdleSocket(connect_job->PassSocket(), group);
429 } else if (rv == ERR_IO_PENDING) {
430 // If we don't have any sockets in this group, set a timer for potentially
431 // creating a new one. If the SYN is lost, this backup socket may complete
432 // before the slow socket, improving end user latency.
433 if (connect_backup_jobs_enabled_ &&
434 group->IsEmpty() && !group->HasBackupJob()) {
435 group->StartBackupSocketTimer(group_name, this);
438 connecting_socket_count_++;
440 group->AddJob(connect_job.Pass(), preconnecting);
441 } else {
442 LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
443 scoped_ptr<StreamSocket> error_socket;
444 if (!preconnecting) {
445 DCHECK(handle);
446 connect_job->GetAdditionalErrorState(handle);
447 error_socket = connect_job->PassSocket();
449 if (error_socket) {
450 HandOutSocket(error_socket.Pass(), false /* not reused */,
451 connect_job->connect_timing(), handle, base::TimeDelta(),
452 group, request.net_log());
453 } else if (group->IsEmpty()) {
454 RemoveGroup(group_name);
458 return rv;
461 bool ClientSocketPoolBaseHelper::AssignIdleSocketToRequest(
462 const Request& request, Group* group) {
463 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
464 std::list<IdleSocket>::iterator idle_socket_it = idle_sockets->end();
466 // Iterate through the idle sockets forwards (oldest to newest)
467 // * Delete any disconnected ones.
468 // * If we find a used idle socket, assign to |idle_socket|. At the end,
469 // the |idle_socket_it| will be set to the newest used idle socket.
470 for (std::list<IdleSocket>::iterator it = idle_sockets->begin();
471 it != idle_sockets->end();) {
472 if (!it->socket->IsConnectedAndIdle()) {
473 DecrementIdleCount();
474 delete it->socket;
475 it = idle_sockets->erase(it);
476 continue;
479 if (it->socket->WasEverUsed()) {
480 // We found one we can reuse!
481 idle_socket_it = it;
484 ++it;
487 // If we haven't found an idle socket, that means there are no used idle
488 // sockets. Pick the oldest (first) idle socket (FIFO).
490 if (idle_socket_it == idle_sockets->end() && !idle_sockets->empty())
491 idle_socket_it = idle_sockets->begin();
493 if (idle_socket_it != idle_sockets->end()) {
494 DecrementIdleCount();
495 base::TimeDelta idle_time =
496 base::TimeTicks::Now() - idle_socket_it->start_time;
497 IdleSocket idle_socket = *idle_socket_it;
498 idle_sockets->erase(idle_socket_it);
499 HandOutSocket(
500 scoped_ptr<StreamSocket>(idle_socket.socket),
501 idle_socket.socket->WasEverUsed(),
502 LoadTimingInfo::ConnectTiming(),
503 request.handle(),
504 idle_time,
505 group,
506 request.net_log());
507 return true;
510 return false;
513 // static
514 void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest(
515 const NetLog::Source& connect_job_source, const Request& request) {
516 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB,
517 connect_job_source.ToEventParametersCallback());
520 void ClientSocketPoolBaseHelper::CancelRequest(
521 const std::string& group_name, ClientSocketHandle* handle) {
522 PendingCallbackMap::iterator callback_it = pending_callback_map_.find(handle);
523 if (callback_it != pending_callback_map_.end()) {
524 int result = callback_it->second.result;
525 pending_callback_map_.erase(callback_it);
526 scoped_ptr<StreamSocket> socket = handle->PassSocket();
527 if (socket) {
528 if (result != OK)
529 socket->Disconnect();
530 ReleaseSocket(handle->group_name(), socket.Pass(), handle->id());
532 return;
535 CHECK(ContainsKey(group_map_, group_name));
537 Group* group = GetOrCreateGroup(group_name);
539 // Search pending_requests for matching handle.
540 scoped_ptr<const Request> request =
541 group->FindAndRemovePendingRequest(handle);
542 if (request) {
543 request->net_log().AddEvent(NetLog::TYPE_CANCELLED);
544 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
546 // We let the job run, unless we're at the socket limit and there is
547 // not another request waiting on the job.
548 if (group->jobs().size() > group->pending_request_count() &&
549 ReachedMaxSocketsLimit()) {
550 RemoveConnectJob(*group->jobs().begin(), group);
551 CheckForStalledSocketGroups();
556 bool ClientSocketPoolBaseHelper::HasGroup(const std::string& group_name) const {
557 return ContainsKey(group_map_, group_name);
560 void ClientSocketPoolBaseHelper::CloseIdleSockets() {
561 CleanupIdleSockets(true);
562 DCHECK_EQ(0, idle_socket_count_);
565 int ClientSocketPoolBaseHelper::IdleSocketCountInGroup(
566 const std::string& group_name) const {
567 GroupMap::const_iterator i = group_map_.find(group_name);
568 CHECK(i != group_map_.end());
570 return i->second->idle_sockets().size();
573 LoadState ClientSocketPoolBaseHelper::GetLoadState(
574 const std::string& group_name,
575 const ClientSocketHandle* handle) const {
576 if (ContainsKey(pending_callback_map_, handle))
577 return LOAD_STATE_CONNECTING;
579 if (!ContainsKey(group_map_, group_name)) {
580 NOTREACHED() << "ClientSocketPool does not contain group: " << group_name
581 << " for handle: " << handle;
582 return LOAD_STATE_IDLE;
585 // Can't use operator[] since it is non-const.
586 const Group& group = *group_map_.find(group_name)->second;
588 if (group.HasConnectJobForHandle(handle)) {
589 // Just return the state of the farthest along ConnectJob for the first
590 // group.jobs().size() pending requests.
591 LoadState max_state = LOAD_STATE_IDLE;
592 for (ConnectJobSet::const_iterator job_it = group.jobs().begin();
593 job_it != group.jobs().end(); ++job_it) {
594 max_state = std::max(max_state, (*job_it)->GetLoadState());
596 return max_state;
599 if (group.IsStalledOnPoolMaxSockets(max_sockets_per_group_))
600 return LOAD_STATE_WAITING_FOR_STALLED_SOCKET_POOL;
601 return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET;
604 base::DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue(
605 const std::string& name, const std::string& type) const {
606 base::DictionaryValue* dict = new base::DictionaryValue();
607 dict->SetString("name", name);
608 dict->SetString("type", type);
609 dict->SetInteger("handed_out_socket_count", handed_out_socket_count_);
610 dict->SetInteger("connecting_socket_count", connecting_socket_count_);
611 dict->SetInteger("idle_socket_count", idle_socket_count_);
612 dict->SetInteger("max_socket_count", max_sockets_);
613 dict->SetInteger("max_sockets_per_group", max_sockets_per_group_);
614 dict->SetInteger("pool_generation_number", pool_generation_number_);
616 if (group_map_.empty())
617 return dict;
619 base::DictionaryValue* all_groups_dict = new base::DictionaryValue();
620 for (GroupMap::const_iterator it = group_map_.begin();
621 it != group_map_.end(); it++) {
622 const Group* group = it->second;
623 base::DictionaryValue* group_dict = new base::DictionaryValue();
625 group_dict->SetInteger("pending_request_count",
626 group->pending_request_count());
627 if (group->has_pending_requests()) {
628 group_dict->SetInteger("top_pending_priority",
629 group->TopPendingPriority());
632 group_dict->SetInteger("active_socket_count", group->active_socket_count());
634 base::ListValue* idle_socket_list = new base::ListValue();
635 std::list<IdleSocket>::const_iterator idle_socket;
636 for (idle_socket = group->idle_sockets().begin();
637 idle_socket != group->idle_sockets().end();
638 idle_socket++) {
639 int source_id = idle_socket->socket->NetLog().source().id;
640 idle_socket_list->Append(new base::FundamentalValue(source_id));
642 group_dict->Set("idle_sockets", idle_socket_list);
644 base::ListValue* connect_jobs_list = new base::ListValue();
645 std::set<ConnectJob*>::const_iterator job = group->jobs().begin();
646 for (job = group->jobs().begin(); job != group->jobs().end(); job++) {
647 int source_id = (*job)->net_log().source().id;
648 connect_jobs_list->Append(new base::FundamentalValue(source_id));
650 group_dict->Set("connect_jobs", connect_jobs_list);
652 group_dict->SetBoolean("is_stalled",
653 group->IsStalledOnPoolMaxSockets(
654 max_sockets_per_group_));
655 group_dict->SetBoolean("has_backup_job", group->HasBackupJob());
657 all_groups_dict->SetWithoutPathExpansion(it->first, group_dict);
659 dict->Set("groups", all_groups_dict);
660 return dict;
663 bool ClientSocketPoolBaseHelper::IdleSocket::ShouldCleanup(
664 base::TimeTicks now,
665 base::TimeDelta timeout) const {
666 bool timed_out = (now - start_time) >= timeout;
667 if (timed_out)
668 return true;
669 if (socket->WasEverUsed())
670 return !socket->IsConnectedAndIdle();
671 return !socket->IsConnected();
674 void ClientSocketPoolBaseHelper::CleanupIdleSockets(bool force) {
675 if (idle_socket_count_ == 0)
676 return;
678 // Current time value. Retrieving it once at the function start rather than
679 // inside the inner loop, since it shouldn't change by any meaningful amount.
680 base::TimeTicks now = base::TimeTicks::Now();
682 GroupMap::iterator i = group_map_.begin();
683 while (i != group_map_.end()) {
684 Group* group = i->second;
686 std::list<IdleSocket>::iterator j = group->mutable_idle_sockets()->begin();
687 while (j != group->idle_sockets().end()) {
688 base::TimeDelta timeout =
689 j->socket->WasEverUsed() ?
690 used_idle_socket_timeout_ : unused_idle_socket_timeout_;
691 if (force || j->ShouldCleanup(now, timeout)) {
692 delete j->socket;
693 j = group->mutable_idle_sockets()->erase(j);
694 DecrementIdleCount();
695 } else {
696 ++j;
700 // Delete group if no longer needed.
701 if (group->IsEmpty()) {
702 RemoveGroup(i++);
703 } else {
704 ++i;
709 ClientSocketPoolBaseHelper::Group* ClientSocketPoolBaseHelper::GetOrCreateGroup(
710 const std::string& group_name) {
711 GroupMap::iterator it = group_map_.find(group_name);
712 if (it != group_map_.end())
713 return it->second;
714 Group* group = new Group;
715 group_map_[group_name] = group;
716 return group;
719 void ClientSocketPoolBaseHelper::RemoveGroup(const std::string& group_name) {
720 GroupMap::iterator it = group_map_.find(group_name);
721 CHECK(it != group_map_.end());
723 RemoveGroup(it);
726 void ClientSocketPoolBaseHelper::RemoveGroup(GroupMap::iterator it) {
727 delete it->second;
728 group_map_.erase(it);
731 // static
732 bool ClientSocketPoolBaseHelper::connect_backup_jobs_enabled() {
733 return g_connect_backup_jobs_enabled;
736 // static
737 bool ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(bool enabled) {
738 bool old_value = g_connect_backup_jobs_enabled;
739 g_connect_backup_jobs_enabled = enabled;
740 return old_value;
743 void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() {
744 connect_backup_jobs_enabled_ = g_connect_backup_jobs_enabled;
747 void ClientSocketPoolBaseHelper::IncrementIdleCount() {
748 if (++idle_socket_count_ == 1 && use_cleanup_timer_)
749 StartIdleSocketTimer();
752 void ClientSocketPoolBaseHelper::DecrementIdleCount() {
753 if (--idle_socket_count_ == 0)
754 timer_.Stop();
757 // static
758 bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() {
759 return g_cleanup_timer_enabled;
762 // static
763 bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled) {
764 bool old_value = g_cleanup_timer_enabled;
765 g_cleanup_timer_enabled = enabled;
766 return old_value;
769 void ClientSocketPoolBaseHelper::StartIdleSocketTimer() {
770 timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kCleanupInterval), this,
771 &ClientSocketPoolBaseHelper::OnCleanupTimerFired);
774 void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name,
775 scoped_ptr<StreamSocket> socket,
776 int id) {
777 GroupMap::iterator i = group_map_.find(group_name);
778 CHECK(i != group_map_.end());
780 Group* group = i->second;
782 CHECK_GT(handed_out_socket_count_, 0);
783 handed_out_socket_count_--;
785 CHECK_GT(group->active_socket_count(), 0);
786 group->DecrementActiveSocketCount();
788 const bool can_reuse = socket->IsConnectedAndIdle() &&
789 id == pool_generation_number_;
790 if (can_reuse) {
791 // Add it to the idle list.
792 AddIdleSocket(socket.Pass(), group);
793 OnAvailableSocketSlot(group_name, group);
794 } else {
795 socket.reset();
798 CheckForStalledSocketGroups();
801 void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
802 // If we have idle sockets, see if we can give one to the top-stalled group.
803 std::string top_group_name;
804 Group* top_group = NULL;
805 if (!FindTopStalledGroup(&top_group, &top_group_name)) {
806 // There may still be a stalled group in a lower level pool.
807 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin();
808 it != lower_pools_.end();
809 ++it) {
810 if ((*it)->IsStalled()) {
811 CloseOneIdleSocket();
812 break;
815 return;
818 if (ReachedMaxSocketsLimit()) {
819 if (idle_socket_count() > 0) {
820 CloseOneIdleSocket();
821 } else {
822 // We can't activate more sockets since we're already at our global
823 // limit.
824 return;
828 // Note: we don't loop on waking stalled groups. If the stalled group is at
829 // its limit, may be left with other stalled groups that could be
830 // woken. This isn't optimal, but there is no starvation, so to avoid
831 // the looping we leave it at this.
832 OnAvailableSocketSlot(top_group_name, top_group);
835 // Search for the highest priority pending request, amongst the groups that
836 // are not at the |max_sockets_per_group_| limit. Note: for requests with
837 // the same priority, the winner is based on group hash ordering (and not
838 // insertion order).
839 bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
840 Group** group,
841 std::string* group_name) const {
842 CHECK((group && group_name) || (!group && !group_name));
843 Group* top_group = NULL;
844 const std::string* top_group_name = NULL;
845 bool has_stalled_group = false;
846 for (GroupMap::const_iterator i = group_map_.begin();
847 i != group_map_.end(); ++i) {
848 Group* curr_group = i->second;
849 if (!curr_group->has_pending_requests())
850 continue;
851 if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
852 if (!group)
853 return true;
854 has_stalled_group = true;
855 bool has_higher_priority = !top_group ||
856 curr_group->TopPendingPriority() > top_group->TopPendingPriority();
857 if (has_higher_priority) {
858 top_group = curr_group;
859 top_group_name = &i->first;
864 if (top_group) {
865 CHECK(group);
866 *group = top_group;
867 *group_name = *top_group_name;
868 } else {
869 CHECK(!has_stalled_group);
871 return has_stalled_group;
874 void ClientSocketPoolBaseHelper::OnConnectJobComplete(
875 int result, ConnectJob* job) {
876 DCHECK_NE(ERR_IO_PENDING, result);
877 const std::string group_name = job->group_name();
878 GroupMap::iterator group_it = group_map_.find(group_name);
879 CHECK(group_it != group_map_.end());
880 Group* group = group_it->second;
882 scoped_ptr<StreamSocket> socket = job->PassSocket();
884 // Copies of these are needed because |job| may be deleted before they are
885 // accessed.
886 BoundNetLog job_log = job->net_log();
887 LoadTimingInfo::ConnectTiming connect_timing = job->connect_timing();
889 // RemoveConnectJob(job, _) must be called by all branches below;
890 // otherwise, |job| will be leaked.
892 if (result == OK) {
893 DCHECK(socket.get());
894 RemoveConnectJob(job, group);
895 scoped_ptr<const Request> request = group->PopNextPendingRequest();
896 if (request) {
897 LogBoundConnectJobToRequest(job_log.source(), *request);
898 HandOutSocket(
899 socket.Pass(), false /* unused socket */, connect_timing,
900 request->handle(), base::TimeDelta(), group, request->net_log());
901 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
902 InvokeUserCallbackLater(request->handle(), request->callback(), result);
903 } else {
904 AddIdleSocket(socket.Pass(), group);
905 OnAvailableSocketSlot(group_name, group);
906 CheckForStalledSocketGroups();
908 } else {
909 // If we got a socket, it must contain error information so pass that
910 // up so that the caller can retrieve it.
911 bool handed_out_socket = false;
912 scoped_ptr<const Request> request = group->PopNextPendingRequest();
913 if (request) {
914 LogBoundConnectJobToRequest(job_log.source(), *request);
915 job->GetAdditionalErrorState(request->handle());
916 RemoveConnectJob(job, group);
917 if (socket.get()) {
918 handed_out_socket = true;
919 HandOutSocket(socket.Pass(), false /* unused socket */,
920 connect_timing, request->handle(), base::TimeDelta(),
921 group, request->net_log());
923 request->net_log().EndEventWithNetErrorCode(
924 NetLog::TYPE_SOCKET_POOL, result);
925 InvokeUserCallbackLater(request->handle(), request->callback(), result);
926 } else {
927 RemoveConnectJob(job, group);
929 if (!handed_out_socket) {
930 OnAvailableSocketSlot(group_name, group);
931 CheckForStalledSocketGroups();
936 void ClientSocketPoolBaseHelper::OnIPAddressChanged() {
937 FlushWithError(ERR_NETWORK_CHANGED);
940 void ClientSocketPoolBaseHelper::FlushWithError(int error) {
941 pool_generation_number_++;
942 CancelAllConnectJobs();
943 CloseIdleSockets();
944 CancelAllRequestsWithError(error);
947 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
948 Group* group) {
949 CHECK_GT(connecting_socket_count_, 0);
950 connecting_socket_count_--;
952 DCHECK(group);
953 group->RemoveJob(job);
955 // If we've got no more jobs for this group, then we no longer need a
956 // backup job either.
957 if (group->jobs().empty())
958 group->CleanupBackupJob();
961 void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
962 const std::string& group_name, Group* group) {
963 DCHECK(ContainsKey(group_map_, group_name));
964 if (group->IsEmpty()) {
965 RemoveGroup(group_name);
966 } else if (group->has_pending_requests()) {
967 ProcessPendingRequest(group_name, group);
971 void ClientSocketPoolBaseHelper::ProcessPendingRequest(
972 const std::string& group_name, Group* group) {
973 const Request* next_request = group->GetNextPendingRequest();
974 DCHECK(next_request);
975 int rv = RequestSocketInternal(group_name, *next_request);
976 if (rv != ERR_IO_PENDING) {
977 scoped_ptr<const Request> request = group->PopNextPendingRequest();
978 DCHECK(request);
979 if (group->IsEmpty())
980 RemoveGroup(group_name);
982 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv);
983 InvokeUserCallbackLater(request->handle(), request->callback(), rv);
987 void ClientSocketPoolBaseHelper::HandOutSocket(
988 scoped_ptr<StreamSocket> socket,
989 bool reused,
990 const LoadTimingInfo::ConnectTiming& connect_timing,
991 ClientSocketHandle* handle,
992 base::TimeDelta idle_time,
993 Group* group,
994 const BoundNetLog& net_log) {
995 DCHECK(socket);
996 handle->SetSocket(socket.Pass());
997 handle->set_is_reused(reused);
998 handle->set_idle_time(idle_time);
999 handle->set_pool_id(pool_generation_number_);
1000 handle->set_connect_timing(connect_timing);
1002 if (reused) {
1003 net_log.AddEvent(
1004 NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET,
1005 NetLog::IntegerCallback(
1006 "idle_ms", static_cast<int>(idle_time.InMilliseconds())));
1009 net_log.AddEvent(
1010 NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET,
1011 handle->socket()->NetLog().source().ToEventParametersCallback());
1013 handed_out_socket_count_++;
1014 group->IncrementActiveSocketCount();
1017 void ClientSocketPoolBaseHelper::AddIdleSocket(
1018 scoped_ptr<StreamSocket> socket,
1019 Group* group) {
1020 DCHECK(socket);
1021 IdleSocket idle_socket;
1022 idle_socket.socket = socket.release();
1023 idle_socket.start_time = base::TimeTicks::Now();
1025 group->mutable_idle_sockets()->push_back(idle_socket);
1026 IncrementIdleCount();
1029 void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
1030 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1031 Group* group = i->second;
1032 connecting_socket_count_ -= group->jobs().size();
1033 group->RemoveAllJobs();
1035 // Delete group if no longer needed.
1036 if (group->IsEmpty()) {
1037 // RemoveGroup() will call .erase() which will invalidate the iterator,
1038 // but i will already have been incremented to a valid iterator before
1039 // RemoveGroup() is called.
1040 RemoveGroup(i++);
1041 } else {
1042 ++i;
1045 DCHECK_EQ(0, connecting_socket_count_);
1048 void ClientSocketPoolBaseHelper::CancelAllRequestsWithError(int error) {
1049 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
1050 Group* group = i->second;
1052 while (true) {
1053 scoped_ptr<const Request> request = group->PopNextPendingRequest();
1054 if (!request)
1055 break;
1056 InvokeUserCallbackLater(request->handle(), request->callback(), error);
1059 // Delete group if no longer needed.
1060 if (group->IsEmpty()) {
1061 // RemoveGroup() will call .erase() which will invalidate the iterator,
1062 // but i will already have been incremented to a valid iterator before
1063 // RemoveGroup() is called.
1064 RemoveGroup(i++);
1065 } else {
1066 ++i;
1071 bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
1072 // Each connecting socket will eventually connect and be handed out.
1073 int total = handed_out_socket_count_ + connecting_socket_count_ +
1074 idle_socket_count();
1075 // There can be more sockets than the limit since some requests can ignore
1076 // the limit
1077 if (total < max_sockets_)
1078 return false;
1079 return true;
1082 bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
1083 if (idle_socket_count() == 0)
1084 return false;
1085 return CloseOneIdleSocketExceptInGroup(NULL);
1088 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
1089 const Group* exception_group) {
1090 CHECK_GT(idle_socket_count(), 0);
1092 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) {
1093 Group* group = i->second;
1094 if (exception_group == group)
1095 continue;
1096 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
1098 if (!idle_sockets->empty()) {
1099 delete idle_sockets->front().socket;
1100 idle_sockets->pop_front();
1101 DecrementIdleCount();
1102 if (group->IsEmpty())
1103 RemoveGroup(i);
1105 return true;
1109 return false;
1112 bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInHigherLayeredPool() {
1113 // This pool doesn't have any idle sockets. It's possible that a pool at a
1114 // higher layer is holding one of this sockets active, but it's actually idle.
1115 // Query the higher layers.
1116 for (std::set<HigherLayeredPool*>::const_iterator it = higher_pools_.begin();
1117 it != higher_pools_.end(); ++it) {
1118 if ((*it)->CloseOneIdleConnection())
1119 return true;
1121 return false;
1124 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater(
1125 ClientSocketHandle* handle, const CompletionCallback& callback, int rv) {
1126 CHECK(!ContainsKey(pending_callback_map_, handle));
1127 pending_callback_map_[handle] = CallbackResultPair(callback, rv);
1128 base::MessageLoop::current()->PostTask(
1129 FROM_HERE,
1130 base::Bind(&ClientSocketPoolBaseHelper::InvokeUserCallback,
1131 weak_factory_.GetWeakPtr(), handle));
1134 void ClientSocketPoolBaseHelper::InvokeUserCallback(
1135 ClientSocketHandle* handle) {
1136 PendingCallbackMap::iterator it = pending_callback_map_.find(handle);
1138 // Exit if the request has already been cancelled.
1139 if (it == pending_callback_map_.end())
1140 return;
1142 CHECK(!handle->is_initialized());
1143 CompletionCallback callback = it->second.callback;
1144 int result = it->second.result;
1145 pending_callback_map_.erase(it);
1146 callback.Run(result);
1149 void ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools() {
1150 while (IsStalled()) {
1151 // Closing a socket will result in calling back into |this| to use the freed
1152 // socket slot, so nothing else is needed.
1153 if (!CloseOneIdleConnectionInHigherLayeredPool())
1154 return;
1158 ClientSocketPoolBaseHelper::Group::Group()
1159 : unassigned_job_count_(0),
1160 active_socket_count_(0),
1161 weak_factory_(this) {}
1163 ClientSocketPoolBaseHelper::Group::~Group() {
1164 CleanupBackupJob();
1165 DCHECK_EQ(0u, unassigned_job_count_);
1168 void ClientSocketPoolBaseHelper::Group::StartBackupSocketTimer(
1169 const std::string& group_name,
1170 ClientSocketPoolBaseHelper* pool) {
1171 // Only allow one timer pending to create a backup socket.
1172 if (weak_factory_.HasWeakPtrs())
1173 return;
1175 base::MessageLoop::current()->PostDelayedTask(
1176 FROM_HERE,
1177 base::Bind(&Group::OnBackupSocketTimerFired, weak_factory_.GetWeakPtr(),
1178 group_name, pool),
1179 pool->ConnectRetryInterval());
1182 bool ClientSocketPoolBaseHelper::Group::TryToUseUnassignedConnectJob() {
1183 SanityCheck();
1185 if (unassigned_job_count_ == 0)
1186 return false;
1187 --unassigned_job_count_;
1188 return true;
1191 void ClientSocketPoolBaseHelper::Group::AddJob(scoped_ptr<ConnectJob> job,
1192 bool is_preconnect) {
1193 SanityCheck();
1195 if (is_preconnect)
1196 ++unassigned_job_count_;
1197 jobs_.insert(job.release());
1200 void ClientSocketPoolBaseHelper::Group::RemoveJob(ConnectJob* job) {
1201 scoped_ptr<ConnectJob> owned_job(job);
1202 SanityCheck();
1204 std::set<ConnectJob*>::iterator it = jobs_.find(job);
1205 if (it != jobs_.end()) {
1206 jobs_.erase(it);
1207 } else {
1208 NOTREACHED();
1210 size_t job_count = jobs_.size();
1211 if (job_count < unassigned_job_count_)
1212 unassigned_job_count_ = job_count;
1215 void ClientSocketPoolBaseHelper::Group::OnBackupSocketTimerFired(
1216 std::string group_name,
1217 ClientSocketPoolBaseHelper* pool) {
1218 // If there are no more jobs pending, there is no work to do.
1219 // If we've done our cleanups correctly, this should not happen.
1220 if (jobs_.empty()) {
1221 NOTREACHED();
1222 return;
1225 // If our old job is waiting on DNS, or if we can't create any sockets
1226 // right now due to limits, just reset the timer.
1227 if (pool->ReachedMaxSocketsLimit() ||
1228 !HasAvailableSocketSlot(pool->max_sockets_per_group_) ||
1229 (*jobs_.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) {
1230 StartBackupSocketTimer(group_name, pool);
1231 return;
1234 if (pending_requests_.empty())
1235 return;
1237 scoped_ptr<ConnectJob> backup_job =
1238 pool->connect_job_factory_->NewConnectJob(
1239 group_name, **pending_requests_.begin(), pool);
1240 backup_job->net_log().AddEvent(NetLog::TYPE_SOCKET_BACKUP_CREATED);
1241 SIMPLE_STATS_COUNTER("socket.backup_created");
1242 int rv = backup_job->Connect();
1243 pool->connecting_socket_count_++;
1244 ConnectJob* raw_backup_job = backup_job.get();
1245 AddJob(backup_job.Pass(), false);
1246 if (rv != ERR_IO_PENDING)
1247 pool->OnConnectJobComplete(rv, raw_backup_job);
1250 void ClientSocketPoolBaseHelper::Group::SanityCheck() {
1251 DCHECK_LE(unassigned_job_count_, jobs_.size());
1254 void ClientSocketPoolBaseHelper::Group::RemoveAllJobs() {
1255 SanityCheck();
1257 // Delete active jobs.
1258 STLDeleteElements(&jobs_);
1259 unassigned_job_count_ = 0;
1261 // Cancel pending backup job.
1262 weak_factory_.InvalidateWeakPtrs();
1265 const ClientSocketPoolBaseHelper::Request*
1266 ClientSocketPoolBaseHelper::Group::GetNextPendingRequest() const {
1267 return pending_requests_.empty() ? NULL : *pending_requests_.begin();
1270 bool ClientSocketPoolBaseHelper::Group::HasConnectJobForHandle(
1271 const ClientSocketHandle* handle) const {
1272 // Search the first |jobs_.size()| pending requests for |handle|.
1273 // If it's farther back in the deque than that, it doesn't have a
1274 // corresponding ConnectJob.
1275 size_t i = 0;
1276 for (RequestQueue::const_iterator it = pending_requests_.begin();
1277 it != pending_requests_.end() && i < jobs_.size(); ++it, ++i) {
1278 if ((*it)->handle() == handle)
1279 return true;
1281 return false;
1284 void ClientSocketPoolBaseHelper::Group::InsertPendingRequest(
1285 scoped_ptr<const Request> r) {
1286 RequestQueue::iterator it = pending_requests_.begin();
1287 // TODO(mmenke): Should the network stack require requests with
1288 // |ignore_limits| have the highest priority?
1289 while (it != pending_requests_.end() &&
1290 CompareEffectiveRequestPriority(*r, *(*it)) <= 0) {
1291 ++it;
1293 pending_requests_.insert(it, r.release());
1296 scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1297 ClientSocketPoolBaseHelper::Group::PopNextPendingRequest() {
1298 if (pending_requests_.empty())
1299 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>();
1300 return RemovePendingRequest(pending_requests_.begin());
1303 scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1304 ClientSocketPoolBaseHelper::Group::FindAndRemovePendingRequest(
1305 ClientSocketHandle* handle) {
1306 for (RequestQueue::iterator it = pending_requests_.begin();
1307 it != pending_requests_.end(); ++it) {
1308 if ((*it)->handle() == handle) {
1309 scoped_ptr<const Request> request = RemovePendingRequest(it);
1310 return request.Pass();
1313 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>();
1316 scoped_ptr<const ClientSocketPoolBaseHelper::Request>
1317 ClientSocketPoolBaseHelper::Group::RemovePendingRequest(
1318 const RequestQueue::iterator& it) {
1319 scoped_ptr<const Request> request(*it);
1320 pending_requests_.erase(it);
1321 // If there are no more requests, kill the backup timer.
1322 if (pending_requests_.empty())
1323 CleanupBackupJob();
1324 return request.Pass();
1327 } // namespace internal
1329 } // namespace net