libgpt: random UUID conforms to RFC (thx Agnieszka Tabaka)
[helenos.git] / uspace / lib / gpt / libgpt.c
blob6a36f33a4c914cfb8ddb7ac2d5f113f485ca4d65
1 /*
2 * Copyright (c) 2011-2013 Dominik Taborsky
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup libgpt
30 * @{
32 /** @file
35 /* TODO:
36 * The implementation currently supports fixed size partition entries only.
37 * The specification requires otherwise, though.
40 #include <ipc/bd.h>
41 #include <async.h>
42 #include <stdio.h>
43 #include <block.h>
44 #include <errno.h>
45 #include <stdlib.h>
46 #include <assert.h>
47 #include <byteorder.h>
48 #include <adt/checksum.h>
49 #include <mem.h>
50 #include <sys/typefmt.h>
51 #include <mbr.h>
52 #include <align.h>
53 #include "libgpt.h"
55 static int load_and_check_header(service_id_t, aoff64_t, size_t, gpt_header_t *);
56 static gpt_partitions_t *alloc_part_array(uint32_t);
57 static int extend_part_array(gpt_partitions_t *);
58 static int reduce_part_array(gpt_partitions_t *);
59 static uint8_t get_byte(const char *);
60 static bool check_overlap(gpt_part_t *, gpt_part_t *);
61 static bool check_encaps(gpt_part_t *, uint64_t, uint64_t);
63 /** Allocate a GPT label */
64 gpt_label_t *gpt_alloc_label(void)
66 gpt_label_t *label = malloc(sizeof(gpt_label_t));
67 if (label == NULL)
68 return NULL;
70 label->parts = gpt_alloc_partitions();
71 if (label->parts == NULL) {
72 free(label);
73 return NULL;
76 label->gpt = NULL;
77 label->device = 0;
79 return label;
82 /** Free a GPT label */
83 void gpt_free_label(gpt_label_t *label)
85 if (label->gpt != NULL)
86 gpt_free_gpt(label->gpt);
88 if (label->parts != NULL)
89 gpt_free_partitions(label->parts);
91 free(label);
94 /** Allocate a GPT header */
95 gpt_t *gpt_alloc_header(size_t size)
97 gpt_t *gpt = malloc(sizeof(gpt_t));
98 if (gpt == NULL)
99 return NULL;
102 * We might need only sizeof(gpt_header_t), but we should follow
103 * specs and have zeroes through all the rest of the block
105 size_t final_size = max(size, sizeof(gpt_header_t));
106 gpt->header = malloc(final_size);
107 if (gpt->header == NULL) {
108 free(gpt);
109 return NULL;
112 memset(gpt->header, 0, final_size);
113 memcpy(gpt->header->efi_signature, efi_signature, 8);
114 memcpy(gpt->header->revision, revision, 4);
115 gpt->header->header_size = host2uint32_t_le(final_size);
116 gpt->header->entry_lba = host2uint64_t_le((uint64_t) 2);
117 gpt->header->entry_size = host2uint32_t_le(sizeof(gpt_entry_t));
119 return gpt;
122 /** Free a GPT header */
123 void gpt_free_gpt(gpt_t *gpt)
125 free(gpt->header);
126 free(gpt);
129 /** Read GPT from a device
131 * @param label Label to read.
132 * @param dev_handle Device to read GPT from.
134 * @return EOK on success, error code on error.
137 int gpt_read_header(gpt_label_t *label, service_id_t dev_handle)
139 int rc = block_init(EXCHANGE_ATOMIC, dev_handle, 512);
140 if (rc != EOK)
141 return rc;
143 size_t block_size;
144 rc = block_get_bsize(dev_handle, &block_size);
145 if (rc != EOK)
146 goto end;
148 if (label->gpt == NULL) {
149 label->gpt = gpt_alloc_header(block_size);
150 if (label->gpt == NULL) {
151 rc = ENOMEM;
152 goto end;
156 rc = load_and_check_header(dev_handle, GPT_HDR_BA, block_size,
157 label->gpt->header);
158 if ((rc == EBADCHECKSUM) || (rc == EINVAL)) {
159 aoff64_t blocks;
160 rc = block_get_nblocks(dev_handle, &blocks);
161 if (rc != EOK) {
162 gpt_free_gpt(label->gpt);
163 goto end;
166 rc = load_and_check_header(dev_handle, blocks - 1, block_size,
167 label->gpt->header);
168 if ((rc == EBADCHECKSUM) || (rc == EINVAL)) {
169 gpt_free_gpt(label->gpt);
170 goto end;
174 label->device = dev_handle;
175 rc = EOK;
177 end:
178 block_fini(dev_handle);
179 return rc;
182 /** Write GPT header to device
184 * @param label Label to be written.
185 * @param dev_handle Device to write the GPT to.
187 * @return EOK on success, libblock error code otherwise.
190 int gpt_write_header(gpt_label_t *label, service_id_t dev_handle)
192 /* The comm_size argument (the last one) is ignored */
193 int rc = block_init(EXCHANGE_ATOMIC, dev_handle, 4096);
194 if ((rc != EOK) && (rc != EEXIST))
195 return rc;
197 size_t block_size;
198 rc = block_get_bsize(dev_handle, &block_size);
199 if (rc != EOK)
200 goto end;
202 aoff64_t blocks;
203 rc = block_get_nblocks(dev_handle, &blocks);
204 if (rc != EOK)
205 goto end;
207 gpt_set_random_uuid(label->gpt->header->disk_guid);
209 /* Prepare the backup header */
210 label->gpt->header->alternate_lba = label->gpt->header->current_lba;
211 label->gpt->header->current_lba = host2uint64_t_le(blocks - 1);
213 uint64_t lba = label->gpt->header->entry_lba;
214 label->gpt->header->entry_lba = host2uint64_t_le(blocks -
215 (uint32_t_le2host(label->gpt->header->fillries) *
216 sizeof(gpt_entry_t)) / block_size - 1);
218 label->gpt->header->header_crc32 = 0;
219 label->gpt->header->header_crc32 =
220 host2uint32_t_le(compute_crc32((uint8_t *) label->gpt->header,
221 uint32_t_le2host(label->gpt->header->header_size)));
223 /* Write to backup GPT header location */
224 rc = block_write_direct(dev_handle, blocks - 1, GPT_HDR_BS,
225 label->gpt->header);
226 if (rc != EOK)
227 goto end;
229 /* Prepare the main header */
230 label->gpt->header->entry_lba = lba;
232 lba = label->gpt->header->alternate_lba;
233 label->gpt->header->alternate_lba = label->gpt->header->current_lba;
234 label->gpt->header->current_lba = lba;
236 label->gpt->header->header_crc32 = 0;
237 label->gpt->header->header_crc32 =
238 host2uint32_t_le(compute_crc32((uint8_t *) label->gpt->header,
239 uint32_t_le2host(label->gpt->header->header_size)));
241 /* Write to main GPT header location */
242 rc = block_write_direct(dev_handle, GPT_HDR_BA, GPT_HDR_BS,
243 label->gpt->header);
244 if (rc != EOK)
245 goto end;
247 /* Write Protective MBR */
248 br_block_t mbr;
249 memset(&mbr, 0, 512);
251 memset(mbr.pte[0].first_chs, 1, 3);
252 mbr.pte[0].ptype = 0xEE;
253 memset(mbr.pte[0].last_chs, 0xff, 3);
254 mbr.pte[0].first_lba = host2uint32_t_le(1);
255 mbr.pte[0].length = 0xffffffff;
256 mbr.signature = host2uint16_t_le(BR_SIGNATURE);
258 rc = block_write_direct(dev_handle, 0, 1, &mbr);
260 end:
261 block_fini(dev_handle);
262 return rc;
265 /** Alloc partition array */
266 gpt_partitions_t *gpt_alloc_partitions(void)
268 return alloc_part_array(GPT_MIN_PART_NUM);
271 /** Parse partitions from GPT
273 * @param label GPT label to be parsed.
275 * @return EOK on success, error code otherwise.
278 int gpt_read_partitions(gpt_label_t *label)
280 uint32_t fillries = uint32_t_le2host(label->gpt->header->fillries);
281 uint32_t ent_size = uint32_t_le2host(label->gpt->header->entry_size);
282 uint64_t ent_lba = uint64_t_le2host(label->gpt->header->entry_lba);
284 if (label->parts == NULL) {
285 label->parts = alloc_part_array(fillries);
286 if (label->parts == NULL)
287 return ENOMEM;
290 int rc = block_init(EXCHANGE_SERIALIZE, label->device,
291 sizeof(gpt_entry_t));
292 if (rc != EOK) {
293 gpt_free_partitions(label->parts);
294 label->parts = NULL;
295 goto end;
298 size_t block_size;
299 rc = block_get_bsize(label->device, &block_size);
300 if (rc != EOK) {
301 gpt_free_partitions(label->parts);
302 label->parts = NULL;
303 goto end;
306 aoff64_t pos = ent_lba * block_size;
308 for (uint32_t i = 0; i < fillries; i++) {
309 rc = block_read_bytes_direct(label->device, pos, sizeof(gpt_entry_t),
310 label->parts->part_array + i);
311 pos += ent_size;
313 if (rc != EOK) {
314 gpt_free_partitions(label->parts);
315 label->parts = NULL;
316 goto end;
320 uint32_t crc = compute_crc32((uint8_t *) label->parts->part_array,
321 fillries * ent_size);
323 if (uint32_t_le2host(label->gpt->header->pe_array_crc32) != crc) {
324 rc = EBADCHECKSUM;
325 gpt_free_partitions(label->parts);
326 label->parts = NULL;
327 goto end;
330 rc = EOK;
332 end:
333 block_fini(label->device);
334 return rc;
337 /** Write GPT and partitions to device
339 * Note: Also writes the header.
341 * @param label Label to write.
342 * @param dev_handle Device to write the data to.
344 * @return EOK on succes, error code otherwise
347 int gpt_write_partitions(gpt_label_t *label, service_id_t dev_handle)
349 /* comm_size of 4096 is ignored */
350 int rc = block_init(EXCHANGE_ATOMIC, dev_handle, 4096);
351 if ((rc != EOK) && (rc != EEXIST))
352 return rc;
354 size_t block_size;
355 rc = block_get_bsize(dev_handle, &block_size);
356 if (rc != EOK)
357 goto fail;
359 aoff64_t blocks;
360 rc = block_get_nblocks(dev_handle, &blocks);
361 if (rc != EOK)
362 goto fail;
364 if (label->gpt == NULL)
365 label->gpt = gpt_alloc_header(block_size);
367 uint32_t entry_size =
368 uint32_t_le2host(label->gpt->header->entry_size);
369 size_t fillries = (label->parts->fill > GPT_MIN_PART_NUM) ?
370 label->parts->fill : GPT_MIN_PART_NUM;
372 if (entry_size != sizeof(gpt_entry_t))
373 return ENOTSUP;
375 label->gpt->header->fillries = host2uint32_t_le(fillries);
377 uint64_t arr_blocks = (fillries * sizeof(gpt_entry_t)) / block_size;
379 /* Include Protective MBR */
380 uint64_t gpt_space = arr_blocks + GPT_HDR_BS + 1;
382 label->gpt->header->first_usable_lba = host2uint64_t_le(gpt_space);
383 label->gpt->header->last_usable_lba =
384 host2uint64_t_le(blocks - gpt_space - 1);
386 /* Perform checks */
387 gpt_part_foreach (label, p) {
388 if (gpt_get_part_type(p) == GPT_PTE_UNUSED)
389 continue;
391 if (!check_encaps(p, blocks, gpt_space)) {
392 rc = ERANGE;
393 goto fail;
396 gpt_part_foreach (label, q) {
397 if (p == q)
398 continue;
400 if (gpt_get_part_type(p) != GPT_PTE_UNUSED) {
401 if (check_overlap(p, q)) {
402 rc = ERANGE;
403 goto fail;
409 label->gpt->header->pe_array_crc32 =
410 host2uint32_t_le(compute_crc32((uint8_t *) label->parts->part_array,
411 fillries * entry_size));
413 /* Write to backup GPT partition array location */
414 rc = block_write_direct(dev_handle, blocks - arr_blocks - 1,
415 arr_blocks, label->parts->part_array);
416 if (rc != EOK)
417 goto fail;
419 /* Write to main GPT partition array location */
420 rc = block_write_direct(dev_handle,
421 uint64_t_le2host(label->gpt->header->entry_lba),
422 arr_blocks, label->parts->part_array);
423 if (rc != EOK)
424 goto fail;
426 return gpt_write_header(label, dev_handle);
428 fail:
429 block_fini(dev_handle);
430 return rc;
433 /** Allocate a new partition
435 * Note: Use either gpt_alloc_partition() or gpt_get_partition().
436 * This returns a memory block (zero-filled) and needs gpt_add_partition()
437 * to be called to insert it into a partition array.
438 * Requires you to call gpt_free_partition afterwards.
440 * @return Pointer to the new partition or NULL.
443 gpt_part_t *gpt_alloc_partition(void)
445 gpt_part_t *partition = malloc(sizeof(gpt_part_t));
446 if (partition == NULL)
447 return NULL;
449 memset(partition, 0, sizeof(gpt_part_t));
451 return partition;
454 /** Allocate a new partition already inside the label
456 * Note: Use either gpt_alloc_partition() or gpt_get_partition().
457 * This one returns a pointer to the first empty structure already
458 * inside the array, so don't call gpt_add_partition() afterwards.
459 * This is the one you will usually want.
461 * @param label Label to carry new partition.
463 * @return Pointer to the new partition or NULL.
466 gpt_part_t *gpt_get_partition(gpt_label_t *label)
468 gpt_part_t *partition;
470 /* Find the first empty entry */
471 do {
472 if (label->parts->fill == label->parts->arr_size) {
473 if (extend_part_array(label->parts) == -1)
474 return NULL;
477 partition = label->parts->part_array + label->parts->fill++;
478 } while (gpt_get_part_type(partition) != GPT_PTE_UNUSED);
480 return partition;
483 /** Get partition already inside the label
485 * Note: For new partitions use either gpt_alloc_partition() or
486 * gpt_get_partition() unless you want a partition at a specific place.
487 * This returns a pointer to a structure already inside the array,
488 * so don't call gpt_add_partition() afterwards.
489 * This function is handy when you want to change already existing
490 * partition or to simply write somewhere in the middle. This works only
491 * for indexes smaller than either 128 or the actual number of filled
492 * entries.
494 * @param label Label to carrying the partition.
495 * @param idx Index of the partition.
497 * @return Pointer to the partition or NULL when out of range.
500 gpt_part_t *gpt_get_partition_at(gpt_label_t *label, size_t idx)
502 if ((idx >= GPT_MIN_PART_NUM) && (idx >= label->parts->fill))
503 return NULL;
505 return label->parts->part_array + idx;
508 /** Copy partition into partition array
510 * Note: For use with gpt_alloc_partition() only. You will get
511 * duplicates with gpt_get_partition().
512 * Note: Does not call gpt_free_partition()!
514 * @param parts Target label
515 * @param partition Source partition to copy
517 * @return EOK on succes, error code otherwise
520 int gpt_add_partition(gpt_label_t *label, gpt_part_t *partition)
522 /* Find the first empty entry */
524 gpt_part_t *part;
526 do {
527 if (label->parts->fill == label->parts->arr_size) {
528 if (extend_part_array(label->parts) == -1)
529 return ENOMEM;
532 part = label->parts->part_array + label->parts->fill++;
533 } while (gpt_get_part_type(part) != GPT_PTE_UNUSED);
535 memcpy(part, partition, sizeof(gpt_entry_t));
536 return EOK;
539 /** Remove partition from array
541 * Note: Even if it fails, the partition still gets removed. Only
542 * reducing the array failed.
544 * @param label Label to remove from
545 * @param idx Index of the partition to remove
547 * @return EOK on success, ENOMEM on array reduction failure
550 int gpt_remove_partition(gpt_label_t *label, size_t idx)
552 if (idx >= label->parts->arr_size)
553 return EINVAL;
556 * FIXME:
557 * If we allow blank spots, we break the array. If we have more than
558 * 128 partitions in the array and then remove something from
559 * the first 128 partitions, we would forget to write the last one.
562 memset(label->parts->part_array + idx, 0, sizeof(gpt_entry_t));
564 if (label->parts->fill > idx)
565 label->parts->fill = idx;
567 gpt_part_t *partition;
569 if ((label->parts->fill > GPT_MIN_PART_NUM) &&
570 (label->parts->fill < (label->parts->arr_size / 2) -
571 GPT_IGNORE_FILL_NUM)) {
572 for (partition = gpt_get_partition_at(label, label->parts->arr_size / 2);
573 partition < label->parts->part_array + label->parts->arr_size;
574 partition++) {
575 if (gpt_get_part_type(partition) != GPT_PTE_UNUSED)
576 return EOK;
579 if (reduce_part_array(label->parts) == ENOMEM)
580 return ENOMEM;
583 return EOK;
586 /** Free partition list
588 * @param parts Partition list to be freed
591 void gpt_free_partitions(gpt_partitions_t *parts)
593 free(parts->part_array);
594 free(parts);
597 /** Get partition type */
598 size_t gpt_get_part_type(gpt_part_t *partition)
600 size_t i;
602 for (i = 0; gpt_ptypes[i].guid != NULL; i++) {
603 if ((partition->part_type[3] == get_byte(gpt_ptypes[i].guid + 0)) &&
604 (partition->part_type[2] == get_byte(gpt_ptypes[i].guid + 2)) &&
605 (partition->part_type[1] == get_byte(gpt_ptypes[i].guid + 4)) &&
606 (partition->part_type[0] == get_byte(gpt_ptypes[i].guid + 6)) &&
607 (partition->part_type[5] == get_byte(gpt_ptypes[i].guid + 8)) &&
608 (partition->part_type[4] == get_byte(gpt_ptypes[i].guid + 10)) &&
609 (partition->part_type[7] == get_byte(gpt_ptypes[i].guid + 12)) &&
610 (partition->part_type[6] == get_byte(gpt_ptypes[i].guid + 14)) &&
611 (partition->part_type[8] == get_byte(gpt_ptypes[i].guid + 16)) &&
612 (partition->part_type[9] == get_byte(gpt_ptypes[i].guid + 18)) &&
613 (partition->part_type[10] == get_byte(gpt_ptypes[i].guid + 20)) &&
614 (partition->part_type[11] == get_byte(gpt_ptypes[i].guid + 22)) &&
615 (partition->part_type[12] == get_byte(gpt_ptypes[i].guid + 24)) &&
616 (partition->part_type[13] == get_byte(gpt_ptypes[i].guid + 26)) &&
617 (partition->part_type[14] == get_byte(gpt_ptypes[i].guid + 28)) &&
618 (partition->part_type[15] == get_byte(gpt_ptypes[i].guid + 30)))
619 return i;
622 return i;
625 /** Set partition type */
626 void gpt_set_part_type(gpt_part_t *partition, size_t type)
628 /* Beware: first 3 blocks are byteswapped! */
629 partition->part_type[3] = get_byte(gpt_ptypes[type].guid + 0);
630 partition->part_type[2] = get_byte(gpt_ptypes[type].guid + 2);
631 partition->part_type[1] = get_byte(gpt_ptypes[type].guid + 4);
632 partition->part_type[0] = get_byte(gpt_ptypes[type].guid + 6);
634 partition->part_type[5] = get_byte(gpt_ptypes[type].guid + 8);
635 partition->part_type[4] = get_byte(gpt_ptypes[type].guid + 10);
637 partition->part_type[7] = get_byte(gpt_ptypes[type].guid + 12);
638 partition->part_type[6] = get_byte(gpt_ptypes[type].guid + 14);
640 partition->part_type[8] = get_byte(gpt_ptypes[type].guid + 16);
641 partition->part_type[9] = get_byte(gpt_ptypes[type].guid + 18);
642 partition->part_type[10] = get_byte(gpt_ptypes[type].guid + 20);
643 partition->part_type[11] = get_byte(gpt_ptypes[type].guid + 22);
644 partition->part_type[12] = get_byte(gpt_ptypes[type].guid + 24);
645 partition->part_type[13] = get_byte(gpt_ptypes[type].guid + 26);
646 partition->part_type[14] = get_byte(gpt_ptypes[type].guid + 28);
647 partition->part_type[15] = get_byte(gpt_ptypes[type].guid + 30);
650 /** Get partition starting LBA */
651 uint64_t gpt_get_start_lba(gpt_part_t *partition)
653 return uint64_t_le2host(partition->start_lba);
656 /** Set partition starting LBA */
657 void gpt_set_start_lba(gpt_part_t *partition, uint64_t start)
659 partition->start_lba = host2uint64_t_le(start);
662 /** Get partition ending LBA */
663 uint64_t gpt_get_end_lba(gpt_part_t *partition)
665 return uint64_t_le2host(partition->end_lba);
668 /** Set partition ending LBA */
669 void gpt_set_end_lba(gpt_part_t *partition, uint64_t end)
671 partition->end_lba = host2uint64_t_le(end);
674 /** Get partition name */
675 unsigned char * gpt_get_part_name(gpt_part_t *partition)
677 return partition->part_name;
680 /** Copy partition name */
681 void gpt_set_part_name(gpt_part_t *partition, char *name, size_t length)
683 if (length >= 72)
684 length = 71;
686 memcpy(partition->part_name, name, length);
687 partition->part_name[length] = '\0';
690 /** Get partition attribute */
691 bool gpt_get_flag(gpt_part_t *partition, gpt_attr_t flag)
693 return (partition->attributes & (((uint64_t) 1) << flag)) ? 1 : 0;
696 /** Set partition attribute */
697 void gpt_set_flag(gpt_part_t *partition, gpt_attr_t flag, bool value)
699 uint64_t attr = partition->attributes;
701 if (value)
702 attr = attr | (((uint64_t) 1) << flag);
703 else
704 attr = attr ^ (attr & (((uint64_t) 1) << flag));
706 partition->attributes = attr;
709 /** Generate a new pseudo-random UUID compliant with RFC 4122 */
710 void gpt_set_random_uuid(uint8_t *uuid)
712 srandom((unsigned int) (size_t) uuid);
714 for (size_t i = 0; i < 16; i++)
715 uuid[i] = random();
718 * Set version (stored in bits 4-7 of seventh byte) to 4 (random
719 * UUID) and bits 6 and 7 of ninth byte to 0 and 1 respectively -
720 * according to RFC 4122, section 4.4.
722 uuid[6] &= 0x0f;
723 uuid[6] |= (0x4 << 4);
724 uuid[8] &= 0x3f;
725 uuid[8] |= (1 << 7);
728 /** Get next aligned address */
729 uint64_t gpt_get_next_aligned(uint64_t addr, unsigned int alignment)
731 return ALIGN_UP(addr + 1, alignment);
734 static int load_and_check_header(service_id_t dev_handle, aoff64_t addr,
735 size_t block_size, gpt_header_t *header)
737 int rc = block_read_direct(dev_handle, addr, GPT_HDR_BS, header);
738 if (rc != EOK)
739 return rc;
741 /* Check the EFI signature */
742 for (unsigned int i = 0; i < 8; i++) {
743 if (header->efi_signature[i] != efi_signature[i])
744 return EINVAL;
747 /* Check the CRC32 of the header */
748 uint32_t crc = header->header_crc32;
749 header->header_crc32 = 0;
751 if (crc != compute_crc32((uint8_t *) header, header->header_size))
752 return EBADCHECKSUM;
753 else
754 header->header_crc32 = crc;
756 /* Check for zeroes in the rest of the block */
757 for (size_t i = sizeof(gpt_header_t); i < block_size; i++) {
758 if (((uint8_t *) header)[i] != 0)
759 return EINVAL;
762 return EOK;
765 static gpt_partitions_t *alloc_part_array(uint32_t num)
767 gpt_partitions_t *res = malloc(sizeof(gpt_partitions_t));
768 if (res == NULL)
769 return NULL;
771 uint32_t size = num > GPT_BASE_PART_NUM ? num : GPT_BASE_PART_NUM;
772 res->part_array = malloc(size * sizeof(gpt_entry_t));
773 if (res->part_array == NULL) {
774 free(res);
775 return NULL;
778 memset(res->part_array, 0, size * sizeof(gpt_entry_t));
780 res->fill = 0;
781 res->arr_size = num;
783 return res;
786 static int extend_part_array(gpt_partitions_t *partition)
788 size_t nsize = partition->arr_size * 2;
789 gpt_entry_t *entry = malloc(nsize * sizeof(gpt_entry_t));
790 if (entry == NULL)
791 return ENOMEM;
793 memcpy(entry, partition->part_array, partition->fill *
794 sizeof(gpt_entry_t));
795 free(partition->part_array);
797 partition->part_array = entry;
798 partition->arr_size = nsize;
800 return EOK;
803 static int reduce_part_array(gpt_partitions_t *partition)
805 if (partition->arr_size > GPT_MIN_PART_NUM) {
806 unsigned int nsize = partition->arr_size / 2;
807 nsize = nsize > GPT_MIN_PART_NUM ? nsize : GPT_MIN_PART_NUM;
809 gpt_entry_t *entry = malloc(nsize * sizeof(gpt_entry_t));
810 if (entry == NULL)
811 return ENOMEM;
813 memcpy(entry, partition->part_array,
814 partition->fill < nsize ? partition->fill : nsize);
815 free(partition->part_array);
817 partition->part_array = entry;
818 partition->arr_size = nsize;
821 return EOK;
824 /* Parse a byte from a string in hexadecimal */
825 static uint8_t get_byte(const char *c)
827 uint8_t val = 0;
828 char hex[3] = {*c, *(c + 1), 0};
830 str_uint8_t(hex, NULL, 16, false, &val);
831 return val;
834 static bool check_overlap(gpt_part_t *part1, gpt_part_t *part2)
836 if ((gpt_get_start_lba(part1) < gpt_get_start_lba(part2)) &&
837 (gpt_get_end_lba(part1) < gpt_get_start_lba(part2)))
838 return false;
840 if ((gpt_get_start_lba(part1) > gpt_get_start_lba(part2)) &&
841 (gpt_get_end_lba(part2) < gpt_get_start_lba(part1)))
842 return false;
844 return true;
847 static bool check_encaps(gpt_part_t *part, uint64_t blocks,
848 uint64_t first_lba)
851 * We allow "<=" in the second expression because it lacks
852 * MBR so it is smaller by 1 block.
854 if ((gpt_get_start_lba(part) >= first_lba) &&
855 (gpt_get_end_lba(part) <= blocks - first_lba))
856 return true;
858 return false;