gn format: Fix and enable a few more tests
[chromium-blink-merge.git] / courgette / encoded_program.cc
blobb120246d4bc73db694da53797465f9ab8c14b21d
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "courgette/encoded_program.h"
7 #include <algorithm>
8 #include <map>
9 #include <string>
10 #include <vector>
12 #include "base/environment.h"
13 #include "base/logging.h"
14 #include "base/memory/scoped_ptr.h"
15 #include "base/strings/string_util.h"
16 #include "base/strings/utf_string_conversions.h"
17 #include "courgette/courgette.h"
18 #include "courgette/disassembler_elf_32_arm.h"
19 #include "courgette/streams.h"
20 #include "courgette/types_elf.h"
22 namespace courgette {
24 // Stream indexes.
25 const int kStreamMisc = 0;
26 const int kStreamOps = 1;
27 const int kStreamBytes = 2;
28 const int kStreamAbs32Indexes = 3;
29 const int kStreamRel32Indexes = 4;
30 const int kStreamAbs32Addresses = 5;
31 const int kStreamRel32Addresses = 6;
32 const int kStreamCopyCounts = 7;
33 const int kStreamOriginAddresses = kStreamMisc;
35 const int kStreamLimit = 9;
37 // Constructor is here rather than in the header. Although the constructor
38 // appears to do nothing it is fact quite large because of the implicit calls to
39 // field constructors. Ditto for the destructor.
40 EncodedProgram::EncodedProgram() : image_base_(0) {}
41 EncodedProgram::~EncodedProgram() {}
43 // Serializes a vector of integral values using Varint32 coding.
44 template<typename V>
45 CheckBool WriteVector(const V& items, SinkStream* buffer) {
46 size_t count = items.size();
47 bool ok = buffer->WriteSizeVarint32(count);
48 for (size_t i = 0; ok && i < count; ++i) {
49 ok = buffer->WriteSizeVarint32(items[i]);
51 return ok;
54 template<typename V>
55 bool ReadVector(V* items, SourceStream* buffer) {
56 uint32 count;
57 if (!buffer->ReadVarint32(&count))
58 return false;
60 items->clear();
62 bool ok = items->reserve(count);
63 for (size_t i = 0; ok && i < count; ++i) {
64 uint32 item;
65 ok = buffer->ReadVarint32(&item);
66 if (ok)
67 ok = items->push_back(static_cast<typename V::value_type>(item));
70 return ok;
73 // Serializes a vector, using delta coding followed by Varint32 coding.
74 template<typename V>
75 CheckBool WriteU32Delta(const V& set, SinkStream* buffer) {
76 size_t count = set.size();
77 bool ok = buffer->WriteSizeVarint32(count);
78 uint32 prev = 0;
79 for (size_t i = 0; ok && i < count; ++i) {
80 uint32 current = set[i];
81 uint32 delta = current - prev;
82 ok = buffer->WriteVarint32(delta);
83 prev = current;
85 return ok;
88 template <typename V>
89 static CheckBool ReadU32Delta(V* set, SourceStream* buffer) {
90 uint32 count;
92 if (!buffer->ReadVarint32(&count))
93 return false;
95 set->clear();
96 bool ok = set->reserve(count);
97 uint32 prev = 0;
99 for (size_t i = 0; ok && i < count; ++i) {
100 uint32 delta;
101 ok = buffer->ReadVarint32(&delta);
102 if (ok) {
103 uint32 current = prev + delta;
104 ok = set->push_back(current);
105 prev = current;
109 return ok;
112 // Write a vector as the byte representation of the contents.
114 // (This only really makes sense for a type T that has sizeof(T)==1, otherwise
115 // serialized representation is not endian-agnostic. But it is useful to keep
116 // the possibility of a greater size for experiments comparing Varint32 encoding
117 // of a vector of larger integrals vs a plain form.)
119 template<typename V>
120 CheckBool WriteVectorU8(const V& items, SinkStream* buffer) {
121 size_t count = items.size();
122 bool ok = buffer->WriteSizeVarint32(count);
123 if (count != 0 && ok) {
124 size_t byte_count = count * sizeof(typename V::value_type);
125 ok = buffer->Write(static_cast<const void*>(&items[0]), byte_count);
127 return ok;
130 template<typename V>
131 bool ReadVectorU8(V* items, SourceStream* buffer) {
132 uint32 count;
133 if (!buffer->ReadVarint32(&count))
134 return false;
136 items->clear();
137 bool ok = items->resize(count, 0);
138 if (ok && count != 0) {
139 size_t byte_count = count * sizeof(typename V::value_type);
140 return buffer->Read(static_cast<void*>(&((*items)[0])), byte_count);
142 return ok;
145 ////////////////////////////////////////////////////////////////////////////////
147 CheckBool EncodedProgram::DefineRel32Label(int index, RVA value) {
148 return DefineLabelCommon(&rel32_rva_, index, value);
151 CheckBool EncodedProgram::DefineAbs32Label(int index, RVA value) {
152 return DefineLabelCommon(&abs32_rva_, index, value);
155 static const RVA kUnassignedRVA = static_cast<RVA>(-1);
157 CheckBool EncodedProgram::DefineLabelCommon(RvaVector* rvas,
158 int index,
159 RVA rva) {
160 bool ok = true;
161 if (static_cast<int>(rvas->size()) <= index)
162 ok = rvas->resize(index + 1, kUnassignedRVA);
164 if (ok) {
165 DCHECK_EQ((*rvas)[index], kUnassignedRVA)
166 << "DefineLabel double assigned " << index;
167 (*rvas)[index] = rva;
170 return ok;
173 void EncodedProgram::EndLabels() {
174 FinishLabelsCommon(&abs32_rva_);
175 FinishLabelsCommon(&rel32_rva_);
178 void EncodedProgram::FinishLabelsCommon(RvaVector* rvas) {
179 // Replace all unassigned slots with the value at the previous index so they
180 // delta-encode to zero. (There might be better values than zero. The way to
181 // get that is have the higher level assembly program assign the unassigned
182 // slots.)
183 RVA previous = 0;
184 size_t size = rvas->size();
185 for (size_t i = 0; i < size; ++i) {
186 if ((*rvas)[i] == kUnassignedRVA)
187 (*rvas)[i] = previous;
188 else
189 previous = (*rvas)[i];
193 CheckBool EncodedProgram::AddOrigin(RVA origin) {
194 return ops_.push_back(ORIGIN) && origins_.push_back(origin);
197 CheckBool EncodedProgram::AddCopy(size_t count, const void* bytes) {
198 const uint8* source = static_cast<const uint8*>(bytes);
200 bool ok = true;
202 // Fold adjacent COPY instructions into one. This nearly halves the size of
203 // an EncodedProgram with only COPY1 instructions since there are approx plain
204 // 16 bytes per reloc. This has a working-set benefit during decompression.
205 // For compression of files with large differences this makes a small (4%)
206 // improvement in size. For files with small differences this degrades the
207 // compressed size by 1.3%
208 if (!ops_.empty()) {
209 if (ops_.back() == COPY1) {
210 ops_.back() = COPY;
211 ok = copy_counts_.push_back(1);
213 if (ok && ops_.back() == COPY) {
214 copy_counts_.back() += count;
215 for (size_t i = 0; ok && i < count; ++i) {
216 ok = copy_bytes_.push_back(source[i]);
218 return ok;
222 if (ok) {
223 if (count == 1) {
224 ok = ops_.push_back(COPY1) && copy_bytes_.push_back(source[0]);
225 } else {
226 ok = ops_.push_back(COPY) && copy_counts_.push_back(count);
227 for (size_t i = 0; ok && i < count; ++i) {
228 ok = copy_bytes_.push_back(source[i]);
233 return ok;
236 CheckBool EncodedProgram::AddAbs32(int label_index) {
237 return ops_.push_back(ABS32) && abs32_ix_.push_back(label_index);
240 CheckBool EncodedProgram::AddRel32(int label_index) {
241 return ops_.push_back(REL32) && rel32_ix_.push_back(label_index);
244 CheckBool EncodedProgram::AddRel32ARM(uint16 op, int label_index) {
245 return ops_.push_back(static_cast<OP>(op)) &&
246 rel32_ix_.push_back(label_index);
249 CheckBool EncodedProgram::AddPeMakeRelocs(ExecutableType kind) {
250 if (kind == EXE_WIN_32_X86)
251 return ops_.push_back(MAKE_PE_RELOCATION_TABLE);
252 return ops_.push_back(MAKE_PE64_RELOCATION_TABLE);
255 CheckBool EncodedProgram::AddElfMakeRelocs() {
256 return ops_.push_back(MAKE_ELF_RELOCATION_TABLE);
259 CheckBool EncodedProgram::AddElfARMMakeRelocs() {
260 return ops_.push_back(MAKE_ELF_ARM_RELOCATION_TABLE);
263 void EncodedProgram::DebuggingSummary() {
264 VLOG(1) << "EncodedProgram Summary"
265 << "\n image base " << image_base_
266 << "\n abs32 rvas " << abs32_rva_.size()
267 << "\n rel32 rvas " << rel32_rva_.size()
268 << "\n ops " << ops_.size()
269 << "\n origins " << origins_.size()
270 << "\n copy_counts " << copy_counts_.size()
271 << "\n copy_bytes " << copy_bytes_.size()
272 << "\n abs32_ix " << abs32_ix_.size()
273 << "\n rel32_ix " << rel32_ix_.size();
276 ////////////////////////////////////////////////////////////////////////////////
278 // For algorithm refinement purposes it is useful to write subsets of the file
279 // format. This gives us the ability to estimate the entropy of the
280 // differential compression of the individual streams, which can provide
281 // invaluable insights. The default, of course, is to include all the streams.
283 enum FieldSelect {
284 INCLUDE_ABS32_ADDRESSES = 0x0001,
285 INCLUDE_REL32_ADDRESSES = 0x0002,
286 INCLUDE_ABS32_INDEXES = 0x0010,
287 INCLUDE_REL32_INDEXES = 0x0020,
288 INCLUDE_OPS = 0x0100,
289 INCLUDE_BYTES = 0x0200,
290 INCLUDE_COPY_COUNTS = 0x0400,
291 INCLUDE_MISC = 0x1000
294 static FieldSelect GetFieldSelect() {
295 #if 1
296 // TODO(sra): Use better configuration.
297 scoped_ptr<base::Environment> env(base::Environment::Create());
298 std::string s;
299 env->GetVar("A_FIELDS", &s);
300 if (!s.empty()) {
301 return static_cast<FieldSelect>(
302 wcstoul(base::ASCIIToWide(s).c_str(), 0, 0));
304 #endif
305 return static_cast<FieldSelect>(~0);
308 CheckBool EncodedProgram::WriteTo(SinkStreamSet* streams) {
309 FieldSelect select = GetFieldSelect();
311 // The order of fields must be consistent in WriteTo and ReadFrom, regardless
312 // of the streams used. The code can be configured with all kStreamXXX
313 // constants the same.
315 // If we change the code to pipeline reading with assembly (to avoid temporary
316 // storage vectors by consuming operands directly from the stream) then we
317 // need to read the base address and the random access address tables first,
318 // the rest can be interleaved.
320 if (select & INCLUDE_MISC) {
321 // TODO(sra): write 64 bits.
322 if (!streams->stream(kStreamMisc)->WriteVarint32(
323 static_cast<uint32>(image_base_))) {
324 return false;
328 bool success = true;
330 if (select & INCLUDE_ABS32_ADDRESSES) {
331 success &= WriteU32Delta(abs32_rva_,
332 streams->stream(kStreamAbs32Addresses));
335 if (select & INCLUDE_REL32_ADDRESSES) {
336 success &= WriteU32Delta(rel32_rva_,
337 streams->stream(kStreamRel32Addresses));
340 if (select & INCLUDE_MISC)
341 success &= WriteVector(origins_, streams->stream(kStreamOriginAddresses));
343 if (select & INCLUDE_OPS) {
344 // 5 for length.
345 success &= streams->stream(kStreamOps)->Reserve(ops_.size() + 5);
346 success &= WriteVector(ops_, streams->stream(kStreamOps));
349 if (select & INCLUDE_COPY_COUNTS)
350 success &= WriteVector(copy_counts_, streams->stream(kStreamCopyCounts));
352 if (select & INCLUDE_BYTES)
353 success &= WriteVectorU8(copy_bytes_, streams->stream(kStreamBytes));
355 if (select & INCLUDE_ABS32_INDEXES)
356 success &= WriteVector(abs32_ix_, streams->stream(kStreamAbs32Indexes));
358 if (select & INCLUDE_REL32_INDEXES)
359 success &= WriteVector(rel32_ix_, streams->stream(kStreamRel32Indexes));
361 return success;
364 bool EncodedProgram::ReadFrom(SourceStreamSet* streams) {
365 // TODO(sra): read 64 bits.
366 uint32 temp;
367 if (!streams->stream(kStreamMisc)->ReadVarint32(&temp))
368 return false;
369 image_base_ = temp;
371 if (!ReadU32Delta(&abs32_rva_, streams->stream(kStreamAbs32Addresses)))
372 return false;
373 if (!ReadU32Delta(&rel32_rva_, streams->stream(kStreamRel32Addresses)))
374 return false;
375 if (!ReadVector(&origins_, streams->stream(kStreamOriginAddresses)))
376 return false;
377 if (!ReadVector(&ops_, streams->stream(kStreamOps)))
378 return false;
379 if (!ReadVector(&copy_counts_, streams->stream(kStreamCopyCounts)))
380 return false;
381 if (!ReadVectorU8(&copy_bytes_, streams->stream(kStreamBytes)))
382 return false;
383 if (!ReadVector(&abs32_ix_, streams->stream(kStreamAbs32Indexes)))
384 return false;
385 if (!ReadVector(&rel32_ix_, streams->stream(kStreamRel32Indexes)))
386 return false;
388 // Check that streams have been completely consumed.
389 for (int i = 0; i < kStreamLimit; ++i) {
390 if (streams->stream(i)->Remaining() > 0)
391 return false;
394 return true;
397 // Safe, non-throwing version of std::vector::at(). Returns 'true' for success,
398 // 'false' for out-of-bounds index error.
399 template<typename V, typename T>
400 bool VectorAt(const V& v, size_t index, T* output) {
401 if (index >= v.size())
402 return false;
403 *output = v[index];
404 return true;
407 CheckBool EncodedProgram::EvaluateRel32ARM(OP op,
408 size_t& ix_rel32_ix,
409 RVA& current_rva,
410 SinkStream* output) {
411 switch (op & 0x0000F000) {
412 case REL32ARM8: {
413 uint32 index;
414 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index))
415 return false;
416 ++ix_rel32_ix;
417 RVA rva;
418 if (!VectorAt(rel32_rva_, index, &rva))
419 return false;
420 uint32 decompressed_op;
421 if (!DisassemblerElf32ARM::Decompress(ARM_OFF8,
422 static_cast<uint16>(op),
423 static_cast<uint32>(rva -
424 current_rva),
425 &decompressed_op)) {
426 return false;
428 uint16 op16 = static_cast<uint16>(decompressed_op);
429 if (!output->Write(&op16, 2))
430 return false;
431 current_rva += 2;
432 break;
434 case REL32ARM11: {
435 uint32 index;
436 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index))
437 return false;
438 ++ix_rel32_ix;
439 RVA rva;
440 if (!VectorAt(rel32_rva_, index, &rva))
441 return false;
442 uint32 decompressed_op;
443 if (!DisassemblerElf32ARM::Decompress(ARM_OFF11, (uint16) op,
444 (uint32) (rva - current_rva),
445 &decompressed_op)) {
446 return false;
448 uint16 op16 = static_cast<uint16>(decompressed_op);
449 if (!output->Write(&op16, 2))
450 return false;
451 current_rva += 2;
452 break;
454 case REL32ARM24: {
455 uint32 index;
456 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index))
457 return false;
458 ++ix_rel32_ix;
459 RVA rva;
460 if (!VectorAt(rel32_rva_, index, &rva))
461 return false;
462 uint32 decompressed_op;
463 if (!DisassemblerElf32ARM::Decompress(ARM_OFF24, (uint16) op,
464 (uint32) (rva - current_rva),
465 &decompressed_op)) {
466 return false;
468 if (!output->Write(&decompressed_op, 4))
469 return false;
470 current_rva += 4;
471 break;
473 case REL32ARM25: {
474 uint32 index;
475 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index))
476 return false;
477 ++ix_rel32_ix;
478 RVA rva;
479 if (!VectorAt(rel32_rva_, index, &rva))
480 return false;
481 uint32 decompressed_op;
482 if (!DisassemblerElf32ARM::Decompress(ARM_OFF25, (uint16) op,
483 (uint32) (rva - current_rva),
484 &decompressed_op)) {
485 return false;
487 uint32 words = (decompressed_op << 16) | (decompressed_op >> 16);
488 if (!output->Write(&words, 4))
489 return false;
490 current_rva += 4;
491 break;
493 case REL32ARM21: {
494 uint32 index;
495 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index))
496 return false;
497 ++ix_rel32_ix;
498 RVA rva;
499 if (!VectorAt(rel32_rva_, index, &rva))
500 return false;
501 uint32 decompressed_op;
502 if (!DisassemblerElf32ARM::Decompress(ARM_OFF21, (uint16) op,
503 (uint32) (rva - current_rva),
504 &decompressed_op)) {
505 return false;
507 uint32 words = (decompressed_op << 16) | (decompressed_op >> 16);
508 if (!output->Write(&words, 4))
509 return false;
510 current_rva += 4;
511 break;
513 default:
514 return false;
517 return true;
520 CheckBool EncodedProgram::AssembleTo(SinkStream* final_buffer) {
521 // For the most part, the assembly process walks the various tables.
522 // ix_mumble is the index into the mumble table.
523 size_t ix_origins = 0;
524 size_t ix_copy_counts = 0;
525 size_t ix_copy_bytes = 0;
526 size_t ix_abs32_ix = 0;
527 size_t ix_rel32_ix = 0;
529 RVA current_rva = 0;
531 bool pending_pe_relocation_table = false;
532 uint8 pending_pe_relocation_table_type = 0x03; // IMAGE_REL_BASED_HIGHLOW
533 Elf32_Word pending_elf_relocation_table_type = 0;
534 SinkStream bytes_following_relocation_table;
536 SinkStream* output = final_buffer;
538 for (size_t ix_ops = 0; ix_ops < ops_.size(); ++ix_ops) {
539 OP op = ops_[ix_ops];
541 switch (op) {
542 default:
543 if (!EvaluateRel32ARM(op, ix_rel32_ix, current_rva, output))
544 return false;
545 break;
547 case ORIGIN: {
548 RVA section_rva;
549 if (!VectorAt(origins_, ix_origins, &section_rva))
550 return false;
551 ++ix_origins;
552 current_rva = section_rva;
553 break;
556 case COPY: {
557 size_t count;
558 if (!VectorAt(copy_counts_, ix_copy_counts, &count))
559 return false;
560 ++ix_copy_counts;
561 for (size_t i = 0; i < count; ++i) {
562 uint8 b;
563 if (!VectorAt(copy_bytes_, ix_copy_bytes, &b))
564 return false;
565 ++ix_copy_bytes;
566 if (!output->Write(&b, 1))
567 return false;
569 current_rva += static_cast<RVA>(count);
570 break;
573 case COPY1: {
574 uint8 b;
575 if (!VectorAt(copy_bytes_, ix_copy_bytes, &b))
576 return false;
577 ++ix_copy_bytes;
578 if (!output->Write(&b, 1))
579 return false;
580 current_rva += 1;
581 break;
584 case REL32: {
585 uint32 index;
586 if (!VectorAt(rel32_ix_, ix_rel32_ix, &index))
587 return false;
588 ++ix_rel32_ix;
589 RVA rva;
590 if (!VectorAt(rel32_rva_, index, &rva))
591 return false;
592 uint32 offset = (rva - (current_rva + 4));
593 if (!output->Write(&offset, 4))
594 return false;
595 current_rva += 4;
596 break;
599 case ABS32: {
600 uint32 index;
601 if (!VectorAt(abs32_ix_, ix_abs32_ix, &index))
602 return false;
603 ++ix_abs32_ix;
604 RVA rva;
605 if (!VectorAt(abs32_rva_, index, &rva))
606 return false;
607 uint32 abs32 = static_cast<uint32>(rva + image_base_);
608 if (!abs32_relocs_.push_back(current_rva) || !output->Write(&abs32, 4))
609 return false;
610 current_rva += 4;
611 break;
614 case MAKE_PE_RELOCATION_TABLE: {
615 // We can see the base relocation anywhere, but we only have the
616 // information to generate it at the very end. So we divert the bytes
617 // we are generating to a temporary stream.
618 if (pending_pe_relocation_table)
619 return false; // Can't have two base relocation tables.
621 pending_pe_relocation_table = true;
622 output = &bytes_following_relocation_table;
623 break;
624 // There is a potential problem *if* the instruction stream contains
625 // some REL32 relocations following the base relocation and in the same
626 // section. We don't know the size of the table, so 'current_rva' will
627 // be wrong, causing REL32 offsets to be miscalculated. This never
628 // happens; the base relocation table is usually in a section of its
629 // own, a data-only section, and following everything else in the
630 // executable except some padding zero bytes. We could fix this by
631 // emitting an ORIGIN after the MAKE_BASE_RELOCATION_TABLE.
634 case MAKE_PE64_RELOCATION_TABLE: {
635 if (pending_pe_relocation_table)
636 return false; // Can't have two base relocation tables.
638 pending_pe_relocation_table = true;
639 pending_pe_relocation_table_type = 0x0A; // IMAGE_REL_BASED_DIR64
640 output = &bytes_following_relocation_table;
641 break;
644 case MAKE_ELF_ARM_RELOCATION_TABLE: {
645 // We can see the base relocation anywhere, but we only have the
646 // information to generate it at the very end. So we divert the bytes
647 // we are generating to a temporary stream.
648 if (pending_elf_relocation_table_type)
649 return false; // Can't have two base relocation tables.
651 pending_elf_relocation_table_type = R_ARM_RELATIVE;
652 output = &bytes_following_relocation_table;
653 break;
656 case MAKE_ELF_RELOCATION_TABLE: {
657 // We can see the base relocation anywhere, but we only have the
658 // information to generate it at the very end. So we divert the bytes
659 // we are generating to a temporary stream.
660 if (pending_elf_relocation_table_type)
661 return false; // Can't have two base relocation tables.
663 pending_elf_relocation_table_type = R_386_RELATIVE;
664 output = &bytes_following_relocation_table;
665 break;
670 if (pending_pe_relocation_table) {
671 if (!GeneratePeRelocations(final_buffer,
672 pending_pe_relocation_table_type) ||
673 !final_buffer->Append(&bytes_following_relocation_table))
674 return false;
677 if (pending_elf_relocation_table_type) {
678 if (!GenerateElfRelocations(pending_elf_relocation_table_type,
679 final_buffer) ||
680 !final_buffer->Append(&bytes_following_relocation_table))
681 return false;
684 // Final verification check: did we consume all lists?
685 if (ix_copy_counts != copy_counts_.size())
686 return false;
687 if (ix_copy_bytes != copy_bytes_.size())
688 return false;
689 if (ix_abs32_ix != abs32_ix_.size())
690 return false;
691 if (ix_rel32_ix != rel32_ix_.size())
692 return false;
694 return true;
697 // RelocBlock has the layout of a block of relocations in the base relocation
698 // table file format.
700 struct RelocBlockPOD {
701 uint32 page_rva;
702 uint32 block_size;
703 uint16 relocs[4096]; // Allow up to one relocation per byte of a 4k page.
706 COMPILE_ASSERT(offsetof(RelocBlockPOD, relocs) == 8, reloc_block_header_size);
708 class RelocBlock {
709 public:
710 RelocBlock() {
711 pod.page_rva = 0xFFFFFFFF;
712 pod.block_size = 8;
715 void Add(uint16 item) {
716 pod.relocs[(pod.block_size-8)/2] = item;
717 pod.block_size += 2;
720 CheckBool Flush(SinkStream* buffer) WARN_UNUSED_RESULT {
721 bool ok = true;
722 if (pod.block_size != 8) {
723 if (pod.block_size % 4 != 0) { // Pad to make size multiple of 4 bytes.
724 Add(0);
726 ok = buffer->Write(&pod, pod.block_size);
727 pod.block_size = 8;
729 return ok;
731 RelocBlockPOD pod;
734 CheckBool EncodedProgram::GeneratePeRelocations(SinkStream* buffer,
735 uint8 type) {
736 std::sort(abs32_relocs_.begin(), abs32_relocs_.end());
738 RelocBlock block;
740 bool ok = true;
741 for (size_t i = 0; ok && i < abs32_relocs_.size(); ++i) {
742 uint32 rva = abs32_relocs_[i];
743 uint32 page_rva = rva & ~0xFFF;
744 if (page_rva != block.pod.page_rva) {
745 ok &= block.Flush(buffer);
746 block.pod.page_rva = page_rva;
748 if (ok)
749 block.Add(((static_cast<uint16>(type)) << 12 ) | (rva & 0xFFF));
751 ok &= block.Flush(buffer);
752 return ok;
755 CheckBool EncodedProgram::GenerateElfRelocations(Elf32_Word r_info,
756 SinkStream* buffer) {
757 std::sort(abs32_relocs_.begin(), abs32_relocs_.end());
759 Elf32_Rel relocation_block;
761 relocation_block.r_info = r_info;
763 bool ok = true;
764 for (size_t i = 0; ok && i < abs32_relocs_.size(); ++i) {
765 relocation_block.r_offset = abs32_relocs_[i];
766 ok = buffer->Write(&relocation_block, sizeof(Elf32_Rel));
769 return ok;
771 ////////////////////////////////////////////////////////////////////////////////
773 Status WriteEncodedProgram(EncodedProgram* encoded, SinkStreamSet* sink) {
774 if (!encoded->WriteTo(sink))
775 return C_STREAM_ERROR;
776 return C_OK;
779 Status ReadEncodedProgram(SourceStreamSet* streams, EncodedProgram** output) {
780 EncodedProgram* encoded = new EncodedProgram();
781 if (encoded->ReadFrom(streams)) {
782 *output = encoded;
783 return C_OK;
785 delete encoded;
786 return C_DESERIALIZATION_FAILED;
789 Status Assemble(EncodedProgram* encoded, SinkStream* buffer) {
790 bool assembled = encoded->AssembleTo(buffer);
791 if (assembled)
792 return C_OK;
793 return C_ASSEMBLY_FAILED;
796 void DeleteEncodedProgram(EncodedProgram* encoded) {
797 delete encoded;
800 } // end namespace