2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/tc-internal.h"
18 #include "hphp/runtime/vm/jit/tc.h"
20 #include "hphp/runtime/base/init-fini-node.h"
21 #include "hphp/runtime/base/perf-warning.h"
22 #include "hphp/runtime/base/runtime-option.h"
23 #include "hphp/runtime/base/stats.h"
24 #include "hphp/runtime/vm/debug/debug.h"
25 #include "hphp/runtime/vm/vm-regs.h"
26 #include "hphp/runtime/vm/workload-stats.h"
28 #include "hphp/runtime/vm/jit/code-cache.h"
29 #include "hphp/runtime/vm/jit/guard-type-profile.h"
30 #include "hphp/runtime/vm/jit/mcgen-translate.h"
31 #include "hphp/runtime/vm/jit/mcgen.h"
32 #include "hphp/runtime/vm/jit/perf-counters.h"
33 #include "hphp/runtime/vm/jit/prof-data.h"
34 #include "hphp/runtime/vm/jit/relocation.h"
35 #include "hphp/runtime/vm/jit/srcdb.h"
36 #include "hphp/runtime/vm/jit/stub-alloc.h"
37 #include "hphp/runtime/vm/jit/tc-prologue.h"
38 #include "hphp/runtime/vm/jit/tc-record.h"
39 #include "hphp/runtime/vm/jit/timer.h"
40 #include "hphp/runtime/vm/jit/trans-db.h"
41 #include "hphp/runtime/vm/jit/unique-stubs.h"
42 #include "hphp/runtime/vm/jit/unwind-itanium.h"
43 #include "hphp/runtime/vm/jit/vasm-emit.h"
44 #include "hphp/runtime/vm/jit/write-lease.h"
46 #include "hphp/util/disasm.h"
47 #include "hphp/util/mutex.h"
48 #include "hphp/util/rds-local.h"
49 #include "hphp/util/trace.h"
51 #include <tbb/concurrent_hash_map.h>
55 extern "C" _Unwind_Reason_Code
56 __gxx_personality_v0(int, _Unwind_Action
, uint64_t, _Unwind_Exception
*,
61 namespace HPHP
{ namespace jit
{ namespace tc
{
63 CodeCache
* g_code
{nullptr};
67 ///////////////////////////////////////////////////////////////////////////////
71 std::atomic
<uint64_t> s_numTrans
;
72 SimpleMutex s_codeLock
{false, RankCodeCache
};
73 SimpleMutex s_metadataLock
{false, RankCodeMetadata
};
74 RDS_LOCAL_NO_CHECK(size_t, s_initialTCSize
);
76 bool shouldPGOFunc(const Func
* func
) {
77 return profData() != nullptr;
80 // A code reuse block owns temporary code blocks used to emit into a reused
81 // segment of another view.
82 struct CodeReuseBlock
{
83 CodeBlock reusedMain
, reusedCold
, reusedFrozen
;
85 // Get a view into possibly reused code blocks (if there is space, and
86 // reusable TC is enabled).
87 CodeCache::View
getMaybeReusedView(CodeCache::View
& src
,
88 const TransRange
& range
) {
89 if (!RuntimeOption::EvalEnableReusableTC
) return src
;
90 auto main
= &src
.main();
91 auto cold
= &src
.cold();
92 auto frozen
= &src
.frozen();
94 auto const pad
= RuntimeOption::EvalReusableTCPadding
;
95 size_t mainSize
= range
.main
.size() + pad
;
96 size_t coldSize
= range
.cold
.size() + pad
;
97 size_t frozenSize
= range
.frozen
.size() + pad
;
98 if (auto const s
= (TCA
)main
->allocInner(mainSize
)) {
99 reusedMain
.init(s
, mainSize
, "Reused main");
102 if (auto const s
= (TCA
)cold
->allocInner(coldSize
)) {
103 reusedCold
.init(s
, coldSize
, "Reused cold");
106 if (cold
!= frozen
) {
107 if (auto const s
= (TCA
)frozen
->allocInner(frozenSize
)) {
108 reusedFrozen
.init(s
, frozenSize
, "Reused frozen");
109 frozen
= &reusedFrozen
;
113 return CodeCache::View(*main
, *cold
, *frozen
, src
.data(), false);
118 ///////////////////////////////////////////////////////////////////////////////
120 TransLoc
TransRange::loc() const {
122 loc
.setMainStart(main
.begin());
123 loc
.setColdStart(cold
.begin() - sizeof(uint32_t));
124 loc
.setFrozenStart(frozen
.begin() - sizeof(uint32_t));
125 loc
.setMainSize(main
.size());
127 assertx(loc
.coldCodeSize() == cold
.size());
128 assertx(loc
.frozenCodeSize() == frozen
.size());
132 bool canTranslate() {
133 return s_numTrans
.load(std::memory_order_relaxed
) <
134 RuntimeOption::EvalJitGlobalTranslationLimit
;
137 using FuncCounterMap
= tbb::concurrent_hash_map
<FuncId
, uint32_t,
139 static FuncCounterMap s_func_counters
;
141 using SrcKeyCounters
= tbb::concurrent_hash_map
<SrcKey
, uint32_t,
142 SrcKey::TbbHashCompare
>;
144 static SrcKeyCounters s_sk_counters
;
146 static RDS_LOCAL_NO_CHECK(bool, s_jittingTimeLimitExceeded
);
148 TranslationResult::Scope
shouldTranslateNoSizeLimit(SrcKey sk
, TransKind kind
) {
149 // If we've hit Eval.JitGlobalTranslationLimit, then we stop translating.
150 if (!canTranslate()) return TranslationResult::Scope::Process
;
152 if (*s_jittingTimeLimitExceeded
) return TranslationResult::Scope::Request
;
154 auto const maxTransTime
= RuntimeOption::EvalJitMaxRequestTranslationTime
;
155 if (maxTransTime
>= 0 && RuntimeOption::ServerExecutionMode()) {
156 auto const transCounter
= Timer::CounterValue(Timer::mcg_translate
);
157 if (transCounter
.wall_time_elapsed
>= maxTransTime
) {
158 if (Trace::moduleEnabledRelease(Trace::mcg
, 1)) {
159 Trace::traceRelease("Skipping translation. "
160 "Time budget of %" PRId64
" exceeded. "
161 "%" PRId64
"us elapsed. "
162 "%" PRId64
" translations completed\n",
164 transCounter
.wall_time_elapsed
,
167 *s_jittingTimeLimitExceeded
= true;
168 return TranslationResult::Scope::Request
;
172 auto const func
= sk
.func();
174 // Do not translate functions from units marked as interpret-only.
175 if (func
->unit()->isInterpretOnly()) {
176 return TranslationResult::Scope::Transient
;
179 // Refuse to JIT Live translations if Eval.JitPGOOnly is enabled.
180 if (RuntimeOption::EvalJitPGOOnly
&&
181 (kind
== TransKind::Live
|| kind
== TransKind::LivePrologue
)) {
182 return TranslationResult::Scope::Transient
;
185 // Refuse to JIT Live / Profile translations for a function until
186 // Eval.JitLiveThreshold / Eval.JitProfileThreshold is hit.
187 auto const isLive
= kind
== TransKind::Live
||
188 kind
== TransKind::LivePrologue
;
189 auto const isProf
= kind
== TransKind::Profile
||
190 kind
== TransKind::ProfPrologue
;
191 if (isLive
|| isProf
) {
192 uint32_t skCount
= 1;
193 if (RuntimeOption::EvalJitSrcKeyThreshold
> 1) {
194 SrcKeyCounters::accessor acc
;
195 if (!s_sk_counters
.insert(acc
, SrcKeyCounters::value_type(sk
, 1))) {
196 skCount
= ++acc
->second
;
200 FuncCounterMap::accessor acc
;
201 if (!s_func_counters
.insert(acc
, {func
->getFuncId(), 1})) ++acc
->second
;
202 auto const funcThreshold
= isLive
? RuntimeOption::EvalJitLiveThreshold
203 : RuntimeOption::EvalJitProfileThreshold
;
204 if (acc
->second
< funcThreshold
) {
205 return TranslationResult::Scope::Transient
;
208 if (skCount
< RuntimeOption::EvalJitSrcKeyThreshold
) {
209 return TranslationResult::Scope::Transient
;
213 return TranslationResult::Scope::Success
;
216 static std::atomic_flag s_did_log
= ATOMIC_FLAG_INIT
;
217 static std::atomic
<bool> s_TCisFull
{false};
219 TranslationResult::Scope
shouldTranslate(SrcKey sk
, TransKind kind
) {
220 if (s_TCisFull
.load(std::memory_order_relaxed
)) {
221 return TranslationResult::Scope::Process
;
224 if (*s_jittingTimeLimitExceeded
) {
225 return TranslationResult::Scope::Request
;
228 auto const main_under
= code().main().used() < CodeCache::AMaxUsage
;
229 auto const cold_under
= code().cold().used() < CodeCache::AColdMaxUsage
;
230 auto const froz_under
= code().frozen().used() < CodeCache::AFrozenMaxUsage
;
232 if (main_under
&& cold_under
&& froz_under
) {
233 return shouldTranslateNoSizeLimit(sk
, kind
);
236 // We use cold and frozen for all kinds of translations, but we
237 // allow PGO translations past the limit for main if there's still
238 // space in code.hot.
239 if (cold_under
&& froz_under
) {
241 case TransKind::ProfPrologue
:
242 case TransKind::Profile
:
243 case TransKind::OptPrologue
:
244 case TransKind::Optimize
:
245 if (code().hotEnabled()) return shouldTranslateNoSizeLimit(sk
, kind
);
252 // Set a flag so we quickly bail from trying to generate new
253 // translations next time.
254 s_TCisFull
.store(true, std::memory_order_relaxed
);
255 Treadmill::enqueue([] { s_sk_counters
.clear(); });
257 if (main_under
&& !s_did_log
.test_and_set() &&
258 RuntimeOption::EvalProfBranchSampleFreq
== 0) {
259 // If we ran out of TC space in cold or frozen but not in main,
260 // something unexpected is happening and we should take note of
261 // it. We skip this logging if TC branch profiling is on, since
262 // it fills up code and frozen at a much higher rate.
264 logPerfWarning("cold_full", 1, [] (StructuredLogEntry
&) {});
267 logPerfWarning("frozen_full", 1, [] (StructuredLogEntry
&) {});
271 return TranslationResult::Scope::Process
;
274 bool newTranslation() {
275 if (s_numTrans
.fetch_add(1, std::memory_order_relaxed
) >=
276 RuntimeOption::EvalJitGlobalTranslationLimit
) {
282 std::unique_lock
<SimpleMutex
> lockCode(bool lock
) {
283 if (lock
) return std::unique_lock
<SimpleMutex
>{ s_codeLock
};
284 return std::unique_lock
<SimpleMutex
>{s_codeLock
, std::defer_lock
};
287 std::unique_lock
<SimpleMutex
> lockMetadata(bool lock
) {
288 if (lock
) return std::unique_lock
<SimpleMutex
>{s_metadataLock
};
289 return std::unique_lock
<SimpleMutex
>{s_metadataLock
, std::defer_lock
};
292 CodeMetaLock::CodeMetaLock(bool f
) :
294 m_meta(lockMetadata(f
)) {
297 void CodeMetaLock::lock() {
302 void CodeMetaLock::unlock() {
307 void assertOwnsCodeLock(OptView v
) {
308 if (!v
|| !v
->isLocal()) s_codeLock
.assertOwnedBySelf();
310 void assertOwnsMetadataLock() { s_metadataLock
.assertOwnedBySelf(); }
313 tl_regState
= VMRegState::CLEAN
;
314 Timer::RequestInit();
315 memset(rl_perf_counters
.getCheck(), 0, sizeof(PerfCounters
));
317 requestInitProfData();
318 *s_initialTCSize
.getCheck() = g_code
->totalUsed();
319 assertx(!g_unwind_rds
.isInit());
320 memset(g_unwind_rds
.get(), 0, sizeof(UnwindRDS
));
321 g_unwind_rds
.markInit();
322 *s_jittingTimeLimitExceeded
.getCheck() = false;
328 if (RuntimeOption::EvalJitProfileGuardTypes
) {
329 logGuardProfileData();
331 Timer::RequestExit();
332 if (profData()) profData()->maybeResetCounters();
333 requestExitProfData();
337 if (Trace::moduleEnabledRelease(Trace::mcgstats
, 1)) {
338 Trace::traceRelease("MCGenerator perf counters for %s:\n",
339 g_context
->getRequestUrl(50).c_str());
340 for (int i
= 0; i
< tpc_num_counters
; i
++) {
341 Trace::traceRelease("%-20s %10" PRId64
"\n",
342 kPerfCounterNames
[i
], rl_perf_counters
[i
]);
344 Trace::traceRelease("\n");
348 void codeEmittedThisRequest(size_t& requestEntry
, size_t& now
) {
349 requestEntry
= *s_initialTCSize
;
350 now
= g_code
->totalUsed();
354 auto codeLock
= lockCode();
355 auto metaLock
= lockMetadata();
357 g_code
= new(low_malloc(sizeof(CodeCache
))) CodeCache();
358 g_ustubs
.emitAll(*g_code
, *Debug::DebugInfo::Get());
360 // Write an .eh_frame section that covers the JIT portion of the TC.
361 initUnwinder(g_code
->base(), g_code
->tcSize(),
362 tc_unwind_personality
);
364 if (auto cti_cap
= g_code
->bytecode().capacity()) {
365 // write an .eh_frame for cti code using default personality
366 initUnwinder(g_code
->bytecode().base(), cti_cap
, __gxx_personality_v0
);
369 Disasm::ExcludedAddressRange(g_code
->base(), g_code
->codeSize());
378 bool isValidCodeAddress(TCA addr
) {
379 return g_code
->isValidCodeAddress(addr
);
382 bool isProfileCodeAddress(TCA addr
) {
383 return g_code
->prof().contains(addr
);
386 bool isHotCodeAddress(TCA addr
) {
387 return g_code
->hot().contains(addr
);
390 void freeTCStub(TCA stub
) {
391 // We need to lock the code because s_freeStubs.push() writes to the stub and
392 // the metadata to protect s_freeStubs itself.
393 auto codeLock
= lockCode();
394 auto metaLock
= lockMetadata();
396 assertx(code().frozen().contains(stub
));
401 void checkFreeProfData() {
402 // In PGO mode, we free all the profiling data once the main code area reaches
403 // its maximum usage and either the hot area is also full or all the functions
404 // that were profiled have already been optimized.
406 // However, we keep the data around indefinitely in a few special modes:
407 // * Eval.EnableReusableTC
408 // * TC dumping enabled (Eval.DumpTC/DumpIR/etc.)
410 // Finally, when the RetranslateAll mode is enabled, the ProfData is discarded
411 // via a different mechanism, after all the optimized translations are
414 !RuntimeOption::EvalEnableReusableTC
&&
415 code().main().used() >= CodeCache::AMaxUsage
&&
416 (!code().hotEnabled() ||
417 profData()->profilingFuncs() == profData()->optimizedFuncs()) &&
418 !transdb::enabled() &&
419 !mcgen::retranslateAllEnabled()) {
424 static void dropSrcDBProfIncomingBranches() {
425 auto const base
= code().prof().base();
426 auto const frontier
= code().prof().frontier();
427 for (auto& it
: srcDB()) {
429 sr
->removeIncomingBranchesInRange(base
, frontier
);
433 void freeProfCode() {
434 Treadmill::enqueue([]{
435 dropSrcDBProfIncomingBranches();
437 // Clearing the inline stacks map is purely an optimization, and it barely
438 // buys us anything when we're using jumpstart (because we have very few
439 // profiling translations, if any), so we skip it in this case.
440 if (!isJitDeserializing()) {
441 auto metaLock
= lockMetadata();
442 auto const base
= code().prof().base();
443 auto const frontier
= code().prof().frontier();
444 eraseInlineStacksInRange(base
, frontier
);
449 bool shouldProfileNewFuncs() {
450 if (profData() == nullptr) return false;
452 // We have two knobs to control the number of functions we're allowed to
453 // profile: Eval.JitProfileRequests and Eval.JitProfileBCSize. We profile new
454 // functions until either of these limits is exceeded. In practice, we expect
455 // to hit the bytecode size limit first, but we keep the request limit around
457 return profData()->profilingBCSize() < RuntimeOption::EvalJitProfileBCSize
&&
458 requestCount() < RuntimeOption::EvalJitProfileRequests
;
461 bool profileFunc(const Func
* func
) {
462 // If retranslateAll has been scheduled (including cases when it is going on,
463 // or has finished), we can't emit more Profile translations. This is to
464 // ensure that, when retranslateAll() runs, no more Profile translations are
465 // being added to ProfData.
466 if (mcgen::retranslateAllScheduled()) return false;
468 if (code().prof().used() >= CodeCache::AProfMaxUsage
) return false;
470 if (!shouldPGOFunc(func
)) return false;
472 if (profData()->optimized(func
->getFuncId())) return false;
474 // If we already started profiling `func', then we return true and skip the
475 // other checks below.
476 if (profData()->profiling(func
->getFuncId())) return true;
478 return shouldProfileNewFuncs();
481 ///////////////////////////////////////////////////////////////////////////////
483 LocalTCBuffer::LocalTCBuffer(Address start
, size_t initialSize
) {
484 TCA fakeStart
= code().threadLocalStart();
485 auto const sz
= initialSize
/ 4;
486 auto initBlock
= [&] (DataBlock
& block
, size_t mxSz
, const char* nm
) {
487 always_assert(sz
<= mxSz
);
488 block
.init(fakeStart
, start
, sz
, mxSz
, nm
);
492 initBlock(m_main
, RuntimeOption::EvalThreadTCMainBufferSize
,
493 "thread local main");
494 initBlock(m_cold
, RuntimeOption::EvalThreadTCColdBufferSize
,
495 "thread local cold");
496 initBlock(m_frozen
, RuntimeOption::EvalThreadTCFrozenBufferSize
,
497 "thread local frozen");
498 initBlock(m_data
, RuntimeOption::EvalThreadTCDataBufferSize
,
499 "thread local data");
502 OptView
LocalTCBuffer::view() {
503 if (!valid()) return folly::none
;
504 return CodeCache::View(m_main
, m_cold
, m_frozen
, m_data
, true);
507 ////////////////////////////////////////////////////////////////////////////////
508 // Translator internals
509 Translator::Translator(SrcKey sk
, TransKind kind
)
510 : sk(sk
), kind(kind
), unit(), vunit()
513 Translator::~Translator() = default;
515 folly::Optional
<TranslationResult
>
516 Translator::acquireLeaseAndRequisitePaperwork() {
519 // Avoid a race where we would create a Live translation while
520 // retranslateAll is in flight and we haven't generated an
521 // Optimized translation yet.
522 auto const shouldEmitLiveTranslation
= [&] {
523 if (mcgen::retranslateAllPending() && !isProfiling(kind
) && profData()) {
524 // Functions that are marked as being profiled or marked as having been
525 // optimized are about to have their translations invalidated during the
526 // publish phase of retranslate all. Don't allow live translations to be
527 // emitted in this scenario.
528 auto const fid
= sk
.func()->getFuncId();
529 return !profData()->profiling(fid
) &&
530 !profData()->optimized(fid
);
534 if (!shouldEmitLiveTranslation()) {
535 return TranslationResult::failTransiently();
538 if (auto const p
= getCached()) return *p
;
540 // Acquire the appropriate lease otherwise bail to a fallback
541 // execution mode (eg. interpreter) by returning a nullptr
542 // translation address.
543 m_lease
.emplace(sk
.func(), kind
);
544 if (!(*m_lease
)) return TranslationResult::failTransiently();
545 computeKind(); // Recompute the kind in case we are no longer profiling.
546 if (!m_lease
->checkKind(kind
)) return TranslationResult::failTransiently();
548 // Check again if we can emit live translations for the given
549 // func now that we have the lock.
550 if (!shouldEmitLiveTranslation()) {
551 return TranslationResult::failTransiently();
554 if (auto const s
= shouldTranslate();
555 s
!= TranslationResult::Scope::Success
) {
556 if (s
== TranslationResult::Scope::Process
) setCachedForProcessFail();
557 return TranslationResult
{s
};
560 if (UNLIKELY(RID().isJittingDisabled())) {
561 TRACE(2, "punting because jitting code was disabled\n");
562 return TranslationResult::failTransiently();
565 // Check for cached one last time since we have all the locks now.
569 TranslationResult::Scope
Translator::shouldTranslate(bool noSizeLimit
) {
570 if (kind
== TransKind::Invalid
) computeKind();
572 return shouldTranslateNoSizeLimit(sk
, kind
);
574 return ::HPHP::jit::tc::shouldTranslate(sk
, kind
);
577 void Translator::translate(folly::Optional
<CodeCache::View
> view
) {
578 if (isProfiling(kind
)) {
579 transId
= profData()->allocTransID();
582 if (!newTranslation()) return;
584 WorkloadStats::EnsureInit();
585 WorkloadStats
guard(WorkloadStats::InTrans
);
592 // Check for translation failure.
595 Timer
timer(Timer::mcg_finishTranslation
);
600 return traceProps(*vunit
);
604 auto codeLock
= lockCode(false);
605 if (!view
.hasValue()) {
606 if (RuntimeOption::EvalEnableReusableTC
) {
607 auto const initialSize
= 256;
608 m_localBuffer
= std::make_unique
<uint8_t[]>(initialSize
);
610 std::make_unique
<LocalTCBuffer
>(m_localBuffer
.get(), initialSize
);
611 view
= m_localTCBuffer
->view();
613 // Using the global TC view. Better lock things.
618 // Tag the translation start, and build the trans meta.
619 // Generate vasm into the code view, retrying if we fill hot.
621 if (!view
.hasValue() || !view
->isLocal()) {
622 view
.emplace(code().view(kind
));
625 TransLocMaker maker
{*view
};
628 emitVunit(*vunit
, unit
.get(), *view
, fixups
,
629 mcgen::dumpTCAnnotation(kind
) ? getAnnotations()
631 } catch (const DataBlockFull
& dbFull
) {
632 always_assert(!view
->isLocal());
633 if (dbFull
.name
== "hot") {
635 // Rollback tags and try again.
640 auto const range
= maker
.markEnd();
641 auto const bytes
= range
.main
.size() + range
.cold
.size() +
643 // There should be few of these. They mean there is wasted work
644 // performing translation for functions that don't have space in the TC.
645 logPerfWarning("translation_overflow", 1, [&] (StructuredLogEntry
& e
) {
646 e
.setStr("kind", show(kind
));
647 e
.setStr("srckey", show(sk
));
648 e
.setStr("data_block", dbFull
.name
);
649 e
.setInt("bytes_dropped", bytes
);
654 transMeta
.emplace(*view
);
655 transMeta
->fixups
= std::move(fixups
);
656 transMeta
->range
= maker
.markEnd();
660 if (isProfiling(kind
)) {
661 profData()->setProfiling(sk
.func());
664 Timer
metaTimer(Timer::mcg_finishTranslation_metadata
);
665 if (unit
&& unit
->logEntry()) {
666 auto metaLock
= lockMetadata();
667 logTranslation(this, transMeta
->range
);
670 if (!RuntimeOption::EvalJitLogAllInlineRegions
.empty()) {
675 bool Translator::translateSuccess() const {
676 return transMeta
.has_value();
679 void Translator::relocate() {
680 assertx(transMeta
.hasValue());
681 // Code emitted directly is relocated during emission (or emitted
682 // directly in place).
683 if (!transMeta
->view
.isLocal()) {
684 assertx(!RuntimeOption::EvalEnableReusableTC
);
688 WorkloadStats::EnsureInit();
689 WorkloadStats
guard(WorkloadStats::InTrans
);
691 auto const& range
= transMeta
->range
;
692 auto& fixups
= transMeta
->fixups
;
696 auto codeLock
= lockCode();
698 auto finalView
= code().view(kind
);
700 auto dstView
= crb
.getMaybeReusedView(finalView
, range
);
701 auto& srcView
= transMeta
->view
;
702 TransLocMaker maker
{dstView
};
706 auto origin
= range
.data
;
707 if (!origin
.empty()) {
708 dstView
.data().bytes(origin
.size(),
709 srcView
.data().toDestAddress(origin
.begin()));
711 auto dest
= maker
.dataRange();
712 auto oAddr
= origin
.begin();
713 auto dAddr
= dest
.begin();
714 while (oAddr
!= origin
.end()) {
715 assertx(dAddr
!= dest
.end());
716 rel
.recordAddress(oAddr
++, dAddr
++, 0);
720 jit::relocate(rel
, dstView
.main(), range
.main
.begin(), range
.main
.end(),
721 srcView
.main(), fixups
, nullptr, AreaIndex::Main
);
722 jit::relocate(rel
, dstView
.cold(), range
.cold
.begin(), range
.cold
.end(),
723 srcView
.cold(), fixups
, nullptr, AreaIndex::Cold
);
724 if (&srcView
.cold() != &srcView
.frozen()) {
725 jit::relocate(rel
, dstView
.frozen(), range
.frozen
.begin(),
726 range
.frozen
.end(), srcView
.frozen(), fixups
, nullptr,
730 } catch (const DataBlockFull
& dbFull
) {
731 if (dbFull
.name
== "hot") {
736 auto const bytes
= range
.main
.size() + range
.cold
.size() +
738 // There should be few of these. They mean there is wasted work
739 // performing translation for functions that don't have space in the TC.
740 logPerfWarning("translation_overflow", 1, [&] (StructuredLogEntry
& e
) {
741 e
.setStr("kind", show(kind
));
742 e
.setStr("srckey", show(sk
));
743 e
.setStr("data_block", dbFull
.name
);
744 e
.setInt("bytes_dropped", bytes
);
749 transMeta
->range
= maker
.markEnd();
750 transMeta
->view
= finalView
;
754 adjustForRelocation(rel
);
755 adjustMetaDataForRelocation(rel
, nullptr, fixups
);
756 adjustCodeForRelocation(rel
, fixups
);
759 TCA
Translator::publish() {
760 assertx(transMeta
.hasValue());
761 auto codeLock
= lockCode();
762 auto metaLock
= lockMetadata();
763 publishMetaInternal();
764 publishCodeInternal();
765 return transMeta
->range
.loc().entry();
768 void Translator::publishMetaInternal() {
769 assertx(transMeta
.hasValue());
770 this->publishMetaImpl();
773 void Translator::publishCodeInternal() {
774 assertx(transMeta
.hasValue());
775 this->publishCodeImpl();
776 updateCodeSizeCounters();
779 ////////////////////////////////////////////////////////////////////////////////