source: mainline/uspace/lib/gpt/libgpt.c@ f4a47e52

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f4a47e52 was f4a47e52, checked in by Dominik Taborsky (AT DOT) <brembyseznamcz>, 12 years ago

Removing debug printfs

  • Property mode set to 100644
File size: 23.1 KB
Line 
1/*
2 * Copyright (c) 2011, 2012, 2013 Dominik Taborsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup libgpt
30 * @{
31 */
32/** @file
33 */
34
35/* TODO:
36 * This implementation only supports fixed size partition entries. Specification
37 * requires otherwise, though. Use void * array and casting to achieve that.
38 */
39
40#include <ipc/bd.h>
41#include <async.h>
42#include <stdio.h>
43#include <block.h>
44#include <errno.h>
45#include <stdlib.h>
46#include <assert.h>
47#include <byteorder.h>
48#include <checksum.h>
49#include <mem.h>
50#include <sys/typefmt.h>
51#include <mbr.h>
52
53
54#include "libgpt.h"
55
56static int load_and_check_header(service_id_t, aoff64_t, size_t, gpt_header_t *);
57static gpt_partitions_t * alloc_part_array(uint32_t);
58static int extend_part_array(gpt_partitions_t *);
59static int reduce_part_array(gpt_partitions_t *);
60static uint8_t get_byte(const char *);
61static bool check_overlap(gpt_part_t *, gpt_part_t *);
62static bool check_encaps(gpt_part_t *, uint64_t, uint64_t);
63
64/** Allocate memory for gpt label */
65gpt_label_t * gpt_alloc_label(void)
66{
67 gpt_label_t *label = malloc(sizeof(gpt_label_t));
68 if (label == NULL)
69 return NULL;
70
71 /* This is necessary so that gpt_part_foreach does not segfault */
72 label->parts = gpt_alloc_partitions();
73 if (label == NULL) {
74 free(label);
75 return NULL;
76 }
77
78 label->gpt = NULL;
79
80 label->device = 0;
81
82 return label;
83}
84
85/** Free gpt_label_t structure */
86void gpt_free_label(gpt_label_t *label)
87{
88 if (label->gpt != NULL)
89 gpt_free_gpt(label->gpt);
90
91 if (label->parts != NULL)
92 gpt_free_partitions(label->parts);
93
94 free(label);
95}
96
97/** Allocate memory for gpt header */
98gpt_t * gpt_alloc_header(size_t size)
99{
100 gpt_t *gpt = malloc(sizeof(gpt_t));
101 if (gpt == NULL)
102 return NULL;
103
104 /*
105 * We might need only sizeof(gpt_header_t), but we should follow
106 * specs and have zeroes through all the rest of the block
107 */
108 size_t final_size = size > sizeof(gpt_header_t) ? size : sizeof(gpt_header_t);
109 gpt->header = malloc(final_size);
110 if (gpt->header == NULL) {
111 free(gpt);
112 return NULL;
113 }
114
115 /* Enter some sane defaults. */
116 memset(gpt->header, 0, final_size);
117 memcpy(gpt->header->efi_signature, efi_signature, 8);
118 memcpy(gpt->header->revision, revision, 4);
119 gpt->header->header_size = host2uint32_t_le(final_size);
120 gpt->header->entry_lba = host2uint64_t_le((uint64_t) 2);
121 gpt->header->entry_size = host2uint32_t_le(sizeof(gpt_entry_t));
122
123
124 return gpt;
125}
126
127/** free() GPT header including gpt->header_lba */
128void gpt_free_gpt(gpt_t *gpt)
129{
130 free(gpt->header);
131 free(gpt);
132}
133
134/** Read GPT from specific device
135 * @param label label structure to fill
136 * @param dev_handle device to read GPT from
137 *
138 * @return EOK on success, errorcode on error
139 */
140int gpt_read_header(gpt_label_t *label, service_id_t dev_handle)
141{
142 int rc;
143 size_t b_size;
144
145 rc = block_init(EXCHANGE_ATOMIC, dev_handle, 512);
146 if (rc != EOK)
147 goto fail;
148
149 rc = block_get_bsize(dev_handle, &b_size);
150 if (rc != EOK)
151 goto fini_fail;
152
153 if (label->gpt == NULL) {
154 label->gpt = gpt_alloc_header(b_size);
155 if (label->gpt == NULL) {
156 rc = ENOMEM;
157 goto fini_fail;
158 }
159 }
160
161 rc = load_and_check_header(dev_handle, GPT_HDR_BA, b_size, label->gpt->header);
162 if (rc == EBADCHECKSUM || rc == EINVAL) {
163 aoff64_t n_blocks;
164 rc = block_get_nblocks(dev_handle, &n_blocks);
165 if (rc != EOK)
166 goto free_fail;
167
168 rc = load_and_check_header(dev_handle, n_blocks - 1, b_size, label->gpt->header);
169 if (rc == EBADCHECKSUM || rc == EINVAL)
170 goto free_fail;
171 }
172
173 label->device = dev_handle;
174 block_fini(dev_handle);
175 return EOK;
176
177free_fail:
178 gpt_free_gpt(label->gpt);
179 label->gpt = NULL;
180fini_fail:
181 block_fini(dev_handle);
182fail:
183 return rc;
184}
185
186/** Write GPT header to device
187 * @param label GPT label header to be written
188 * @param dev_handle device handle to write the data to
189 *
190 * @return EOK on success, libblock error code otherwise
191 *
192 * Note: Firstly write partitions (if modified), then gpt header.
193 */
194int gpt_write_header(gpt_label_t *label, service_id_t dev_handle)
195{
196 int rc;
197 size_t b_size;
198
199 /* The comm_size argument (the last one) is ignored */
200 rc = block_init(EXCHANGE_ATOMIC, dev_handle, 4096);
201 if (rc != EOK && rc != EEXIST)
202 return rc;
203
204 rc = block_get_bsize(dev_handle, &b_size);
205 if (rc != EOK)
206 return rc;
207
208 aoff64_t n_blocks;
209 rc = block_get_nblocks(dev_handle, &n_blocks);
210 if (rc != EOK) {
211 block_fini(dev_handle);
212 return rc;
213 }
214
215 uint64_t tmp;
216
217 gpt_set_random_uuid(label->gpt->header->disk_guid);
218
219 /* Prepare the backup header */
220 label->gpt->header->alternate_lba = label->gpt->header->my_lba;
221 label->gpt->header->my_lba = host2uint64_t_le(n_blocks - 1);
222
223 tmp = label->gpt->header->entry_lba;
224 label->gpt->header->entry_lba = host2uint64_t_le(n_blocks -
225 (uint32_t_le2host(label->gpt->header->fillries) * sizeof(gpt_entry_t))
226 / b_size - 1);
227
228 label->gpt->header->header_crc32 = 0;
229 label->gpt->header->header_crc32 = host2uint32_t_le(
230 compute_crc32((uint8_t *) label->gpt->header,
231 uint32_t_le2host(label->gpt->header->header_size)));
232
233 /* Write to backup GPT header location */
234 rc = block_write_direct(dev_handle, n_blocks - 1, GPT_HDR_BS, label->gpt->header);
235 if (rc != EOK) {
236 block_fini(dev_handle);
237 return rc;
238 }
239
240
241 /* Prepare the main header */
242 label->gpt->header->entry_lba = tmp;
243
244 tmp = label->gpt->header->alternate_lba;
245 label->gpt->header->alternate_lba = label->gpt->header->my_lba;
246 label->gpt->header->my_lba = tmp;
247
248 label->gpt->header->header_crc32 = 0;
249 label->gpt->header->header_crc32 = host2uint32_t_le(
250 compute_crc32((uint8_t *) label->gpt->header,
251 uint32_t_le2host(label->gpt->header->header_size)));
252
253 /* Write to main GPT header location */
254 rc = block_write_direct(dev_handle, GPT_HDR_BA, GPT_HDR_BS, label->gpt->header);
255 if (rc != EOK)
256 return rc;
257
258 /* Write Protective MBR */
259 br_block_t mbr;
260 memset(&mbr, 0, 512);
261 memset(mbr.pte[0].first_chs, 1, 3);
262 mbr.pte[0].ptype = 0xEE;
263 memset(mbr.pte[0].last_chs, 0xFF, 3);
264 mbr.pte[0].first_lba = host2uint32_t_le(1);
265 mbr.pte[0].length = 0xFFFFFFFF;
266 mbr.signature = host2uint16_t_le(BR_SIGNATURE);
267
268 rc = block_write_direct(dev_handle, 0, 1, &mbr);
269 block_fini(dev_handle);
270 if (rc != EOK)
271 return rc;
272
273 return 0;
274}
275
276/** Alloc partition array */
277gpt_partitions_t * gpt_alloc_partitions()
278{
279 return alloc_part_array(GPT_MIN_PART_NUM);
280}
281
282/** Parse partitions from GPT
283 * @param label GPT label to be parsed
284 *
285 * @return EOK on success, errorcode otherwise
286 */
287int gpt_read_partitions(gpt_label_t *label)
288{
289 int rc;
290 unsigned int i;
291 uint32_t fillries = uint32_t_le2host(label->gpt->header->fillries);
292 uint32_t ent_size = uint32_t_le2host(label->gpt->header->entry_size);
293 uint64_t ent_lba = uint64_t_le2host(label->gpt->header->entry_lba);
294
295 if (label->parts == NULL) {
296 label->parts = alloc_part_array(fillries);
297 if (label->parts == NULL) {
298 return ENOMEM;
299 }
300 }
301
302 /* comm_size is ignored */
303 rc = block_init(EXCHANGE_SERIALIZE, label->device, sizeof(gpt_entry_t));
304 if (rc != EOK)
305 goto fail;
306
307 size_t block_size;
308 rc = block_get_bsize(label->device, &block_size);
309 if (rc != EOK)
310 goto fini_fail;
311
312 aoff64_t pos = ent_lba * block_size;
313
314 /*
315 * Now we read just sizeof(gpt_entry_t) bytes for each entry from the device.
316 * Hopefully, this does not bypass cache (no mention in libblock.c),
317 * and also allows us to have variable partition entry size (but we
318 * will always read just sizeof(gpt_entry_t) bytes - hopefully they
319 * don't break backward compatibility)
320 */
321 for (i = 0; i < fillries; ++i) {
322 /*FIXME: this does bypass cache... */
323 rc = block_read_bytes_direct(label->device, pos, sizeof(gpt_entry_t), label->parts->part_array + i);
324 /*
325 * FIXME: but seqread() is just too complex...
326 * rc = block_seqread(gpt->device, &bufpos, &buflen, &pos, res->part_array[i], sizeof(gpt_entry_t));
327 */
328 pos += ent_size;
329
330 if (rc != EOK)
331 goto fini_fail;
332 }
333
334 uint32_t crc = compute_crc32((uint8_t *) label->parts->part_array,
335 fillries * ent_size);
336
337 if (uint32_t_le2host(label->gpt->header->pe_array_crc32) != crc)
338 {
339 rc = EBADCHECKSUM;
340 goto fini_fail;
341 }
342
343 block_fini(label->device);
344 return EOK;
345
346fini_fail:
347 block_fini(label->device);
348
349fail:
350 gpt_free_partitions(label->parts);
351 label->parts = NULL;
352 return rc;
353}
354
355/** Write GPT and partitions to device
356 * Note: also writes the header.
357 * @param label label to write
358 * @param dev_handle device to write the data to
359 *
360 * @return returns EOK on succes, errorcode otherwise
361 */
362int gpt_write_partitions(gpt_label_t *label, service_id_t dev_handle)
363{
364 int rc;
365 size_t b_size;
366
367 /* comm_size of 4096 is ignored */
368 rc = block_init(EXCHANGE_ATOMIC, dev_handle, 4096);
369 if (rc != EOK && rc != EEXIST)
370 return rc;
371
372 rc = block_get_bsize(dev_handle, &b_size);
373 if (rc != EOK)
374 goto fail;
375
376 aoff64_t n_blocks;
377 rc = block_get_nblocks(dev_handle, &n_blocks);
378 if (rc != EOK)
379 goto fail;
380
381 /* When we're creating a new label from scratch, we need to fill
382 * the header with sensible defaults. */
383 if (label->gpt == NULL) {
384 label->gpt = gpt_alloc_header(b_size);
385 }
386
387 uint32_t e_size = uint32_t_le2host(label->gpt->header->entry_size);
388 size_t fillries = label->parts->fill > GPT_MIN_PART_NUM ? label->parts->fill : GPT_MIN_PART_NUM;
389
390 if (e_size != sizeof(gpt_entry_t))
391 return ENOTSUP;
392
393 label->gpt->header->fillries = host2uint32_t_le(fillries);
394 uint64_t arr_blocks = (fillries * sizeof(gpt_entry_t)) / b_size;
395 uint64_t gpt_space = arr_blocks + GPT_HDR_BS + 1; /* +1 for Protective MBR */
396 label->gpt->header->first_usable_lba = host2uint64_t_le(gpt_space);
397 label->gpt->header->last_usable_lba = host2uint64_t_le(n_blocks - gpt_space - 1);
398
399 /* Perform checks */
400 gpt_part_foreach (label, p) {
401 if (gpt_get_part_type(p) == GPT_PTE_UNUSED)
402 continue;
403
404 if (!check_encaps(p, n_blocks, gpt_space)) {
405 rc = ERANGE;
406 goto fail;
407 }
408
409 gpt_part_foreach (label, q) {
410 if (p == q)
411 continue;
412
413 if (gpt_get_part_type(p) != GPT_PTE_UNUSED) {
414 if (check_overlap(p, q)) {
415 rc = ERANGE;
416 goto fail;
417 }
418 }
419 }
420 }
421
422 label->gpt->header->pe_array_crc32 = host2uint32_t_le(compute_crc32(
423 (uint8_t *) label->parts->part_array,
424 fillries * e_size));
425
426
427 /* Write to backup GPT partition array location */
428 rc = block_write_direct(dev_handle, n_blocks - arr_blocks - 1,
429 arr_blocks, label->parts->part_array);
430 if (rc != EOK)
431 goto fail;
432
433 /* Write to main GPT partition array location */
434 rc = block_write_direct(dev_handle, uint64_t_le2host(label->gpt->header->entry_lba),
435 arr_blocks, label->parts->part_array);
436 if (rc != EOK)
437 goto fail;
438
439 return gpt_write_header(label, dev_handle);
440
441fail:
442 block_fini(dev_handle);
443 return rc;
444}
445
446/** Alloc new partition
447 *
448 * @return returns pointer to the new partition or NULL
449 *
450 * Note: use either gpt_alloc_partition or gpt_get_partition.
451 * This returns a memory block (zero-filled) and needs gpt_add_partition()
452 * to be called to insert it into a partition array.
453 * Requires you to call gpt_free_partition afterwards.
454 */
455gpt_part_t * gpt_alloc_partition(void)
456{
457 gpt_part_t *p = malloc(sizeof(gpt_part_t));
458 if (p == NULL)
459 return NULL;
460
461 memset(p, 0, sizeof(gpt_part_t));
462
463 return p;
464}
465
466/** Alloc new partition already inside the label
467 *
468 * @param label label to carry new partition
469 *
470 * @return returns pointer to the new partition or NULL on ENOMEM
471 *
472 * Note: use either gpt_alloc_partition or gpt_get_partition.
473 * This one returns a pointer to the first empty structure already
474 * inside the array, so don't call gpt_add_partition() afterwards.
475 * This is the one you will usually want.
476 */
477gpt_part_t * gpt_get_partition(gpt_label_t *label)
478{
479 gpt_part_t *p;
480
481
482 /* Find the first empty entry */
483 do {
484 if (label->parts->fill == label->parts->arr_size) {
485 if (extend_part_array(label->parts) == -1)
486 return NULL;
487 }
488
489 p = label->parts->part_array + label->parts->fill++;
490
491 } while (gpt_get_part_type(p) != GPT_PTE_UNUSED);
492
493 return p;
494}
495
496/** Get partition already inside the label
497 *
498 * @param label label to carrying the partition
499 * @param idx index of the partition
500 *
501 * @return returns pointer to the partition
502 * or NULL when out of range
503 *
504 * Note: For new partitions use either gpt_alloc_partition or
505 * gpt_get_partition unless you want a partition at a specific place.
506 * This returns a pointer to a structure already inside the array,
507 * so don't call gpt_add_partition() afterwards.
508 * This function is handy when you want to change already existing
509 * partition or to simply write somewhere in the middle. This works only
510 * for indexes smaller than either 128 or the actual number of filled
511 * entries.
512 */
513gpt_part_t * gpt_get_partition_at(gpt_label_t *label, size_t idx)
514{
515 return NULL;
516
517 if (idx >= GPT_MIN_PART_NUM && idx >= label->parts->fill)
518 return NULL;
519
520 return label->parts->part_array + idx;
521}
522
523/** Copy partition into partition array
524 *
525 * @param parts target label
526 * @param partition source partition to copy
527 *
528 * @return -1 on error, 0 otherwise
529 *
530 * Note: for use with gpt_alloc_partition() only. You will get
531 * duplicates with gpt_get_partition().
532 * Note: does not call gpt_free_partition()!
533 */
534int gpt_add_partition(gpt_label_t *label, gpt_part_t *partition)
535{
536 gpt_part_t *p;
537 /* Find the first empty entry */
538 do {
539 if (label->parts->fill == label->parts->arr_size) {
540 if (extend_part_array(label->parts) == -1)
541 return ENOMEM;
542 }
543
544 p = label->parts->part_array + label->parts->fill++;
545
546 } while (gpt_get_part_type(p) != GPT_PTE_UNUSED);
547
548
549 memcpy(p, partition, sizeof(gpt_entry_t));
550
551
552 return EOK;
553}
554
555/** Remove partition from array
556 * @param label label to remove from
557 * @param idx index of the partition to remove
558 *
559 * @return EOK on success, ENOMEM on array reduction failure
560 *
561 * Note: even if it fails, the partition still gets removed. Only
562 * reducing the array failed.
563 */
564int gpt_remove_partition(gpt_label_t *label, size_t idx)
565{
566 if (idx >= label->parts->arr_size)
567 return EINVAL;
568
569 /*
570 * FIXME!
571 * If we allow blank spots, we break the array. If we have more than
572 * 128 partitions in the array and then remove something from
573 * the first 128 partitions, we would forget to write the last one.
574 */
575 memset(label->parts->part_array + idx, 0, sizeof(gpt_entry_t));
576
577 if (label->parts->fill > idx)
578 label->parts->fill = idx;
579
580 /*
581 * FIXME! HOPEFULLY FIXED.
582 * We cannot reduce the array so simply. We may have some partitions
583 * there since we allow blank spots.
584 */
585 gpt_part_t * p;
586
587 if (label->parts->fill > GPT_MIN_PART_NUM &&
588 label->parts->fill < (label->parts->arr_size / 2) - GPT_IGNORE_FILL_NUM) {
589 for (p = gpt_get_partition_at(label, label->parts->arr_size / 2);
590 p < label->parts->part_array + label->parts->arr_size; ++p) {
591 if (gpt_get_part_type(p) != GPT_PTE_UNUSED)
592 return EOK;
593 }
594
595 if (reduce_part_array(label->parts) == ENOMEM)
596 return ENOMEM;
597 }
598
599 return EOK;
600}
601
602/** Free partition list
603 *
604 * @param parts partition list to be freed
605 */
606void gpt_free_partitions(gpt_partitions_t * parts)
607{
608 free(parts->part_array);
609 free(parts);
610}
611
612/** Get partition type by linear search
613 * (hopefully this doesn't get slow)
614 */
615size_t gpt_get_part_type(gpt_part_t * p)
616{
617 size_t i;
618
619 for (i = 0; gpt_ptypes[i].guid != NULL; i++) {
620 if (p->part_type[3] == get_byte(gpt_ptypes[i].guid +0) &&
621 p->part_type[2] == get_byte(gpt_ptypes[i].guid +2) &&
622 p->part_type[1] == get_byte(gpt_ptypes[i].guid +4) &&
623 p->part_type[0] == get_byte(gpt_ptypes[i].guid +6) &&
624
625 p->part_type[5] == get_byte(gpt_ptypes[i].guid +8) &&
626 p->part_type[4] == get_byte(gpt_ptypes[i].guid +10) &&
627
628 p->part_type[7] == get_byte(gpt_ptypes[i].guid +12) &&
629 p->part_type[6] == get_byte(gpt_ptypes[i].guid +14) &&
630
631 p->part_type[8] == get_byte(gpt_ptypes[i].guid +16) &&
632 p->part_type[9] == get_byte(gpt_ptypes[i].guid +18) &&
633 p->part_type[10] == get_byte(gpt_ptypes[i].guid +20) &&
634 p->part_type[11] == get_byte(gpt_ptypes[i].guid +22) &&
635 p->part_type[12] == get_byte(gpt_ptypes[i].guid +24) &&
636 p->part_type[13] == get_byte(gpt_ptypes[i].guid +26) &&
637 p->part_type[14] == get_byte(gpt_ptypes[i].guid +28) &&
638 p->part_type[15] == get_byte(gpt_ptypes[i].guid +30))
639 break;
640 }
641
642 return i;
643}
644
645/** Set partition type
646 * @param p partition to be set
647 * @param type partition type to set
648 * - see our fine selection at gpt_ptypes to choose from
649 */
650void gpt_set_part_type(gpt_part_t * p, size_t type)
651{
652 /* Beware: first 3 blocks are byteswapped! */
653 p->part_type[3] = get_byte(gpt_ptypes[type].guid +0);
654 p->part_type[2] = get_byte(gpt_ptypes[type].guid +2);
655 p->part_type[1] = get_byte(gpt_ptypes[type].guid +4);
656 p->part_type[0] = get_byte(gpt_ptypes[type].guid +6);
657
658 p->part_type[5] = get_byte(gpt_ptypes[type].guid +8);
659 p->part_type[4] = get_byte(gpt_ptypes[type].guid +10);
660
661 p->part_type[7] = get_byte(gpt_ptypes[type].guid +12);
662 p->part_type[6] = get_byte(gpt_ptypes[type].guid +14);
663
664 p->part_type[8] = get_byte(gpt_ptypes[type].guid +16);
665 p->part_type[9] = get_byte(gpt_ptypes[type].guid +18);
666 p->part_type[10] = get_byte(gpt_ptypes[type].guid +20);
667 p->part_type[11] = get_byte(gpt_ptypes[type].guid +22);
668 p->part_type[12] = get_byte(gpt_ptypes[type].guid +24);
669 p->part_type[13] = get_byte(gpt_ptypes[type].guid +26);
670 p->part_type[14] = get_byte(gpt_ptypes[type].guid +28);
671 p->part_type[15] = get_byte(gpt_ptypes[type].guid +30);
672}
673
674/** Get partition starting LBA */
675uint64_t gpt_get_start_lba(gpt_part_t * p)
676{
677 return uint64_t_le2host(p->start_lba);
678}
679
680/** Set partition starting LBA */
681void gpt_set_start_lba(gpt_part_t * p, uint64_t start)
682{
683 p->start_lba = host2uint64_t_le(start);
684}
685
686/** Get partition ending LBA */
687uint64_t gpt_get_end_lba(gpt_part_t * p)
688{
689 return uint64_t_le2host(p->end_lba);
690}
691
692/** Set partition ending LBA */
693void gpt_set_end_lba(gpt_part_t * p, uint64_t end)
694{
695 p->end_lba = host2uint64_t_le(end);
696}
697
698/** Get partition name */
699unsigned char * gpt_get_part_name(gpt_part_t * p)
700{
701 return p->part_name;
702}
703
704/** Copy partition name */
705void gpt_set_part_name(gpt_part_t *p, char *name, size_t length)
706{
707 if (length >= 72)
708 length = 71;
709
710 memcpy(p->part_name, name, length);
711 p->part_name[length] = '\0';
712}
713
714/** Get partition attribute */
715bool gpt_get_flag(gpt_part_t * p, GPT_ATTR flag)
716{
717 return (p->attributes & (((uint64_t) 1) << flag)) ? 1 : 0;
718}
719
720/** Set partition attribute */
721void gpt_set_flag(gpt_part_t * p, GPT_ATTR flag, bool value)
722{
723 uint64_t attr = p->attributes;
724
725 if (value)
726 attr = attr | (((uint64_t) 1) << flag);
727 else
728 attr = attr ^ (attr & (((uint64_t) 1) << flag));
729
730 p->attributes = attr;
731}
732
733/** Generate a new pseudo-random UUID
734 * @param uuid Pointer to the UUID to overwrite.
735 */
736void gpt_set_random_uuid(uint8_t * uuid)
737{
738 srandom((unsigned int) (size_t) uuid);
739
740 unsigned int i;
741 for (i = 0; i < 16/sizeof(long int); ++i)
742 ((long int *)uuid)[i] = random();
743
744}
745
746/** Get next aligned address */
747uint64_t gpt_get_next_aligned(uint64_t addr, unsigned int alignment)
748{
749 uint64_t div = addr / alignment;
750 return (div + 1) * alignment;
751}
752
753/* Internal functions follow */
754
755static int load_and_check_header(service_id_t dev_handle, aoff64_t addr, size_t b_size, gpt_header_t * header)
756{
757 int rc;
758
759 rc = block_read_direct(dev_handle, addr, GPT_HDR_BS, header);
760 if (rc != EOK)
761 return rc;
762
763 unsigned int i;
764 /* Check the EFI signature */
765 for (i = 0; i < 8; ++i) {
766 if (header->efi_signature[i] != efi_signature[i])
767 return EINVAL;
768 }
769
770 /* Check the CRC32 of the header */
771 uint32_t crc = header->header_crc32;
772 header->header_crc32 = 0;
773 if (crc != compute_crc32((uint8_t *) header, header->header_size))
774 return EBADCHECKSUM;
775 else
776 header->header_crc32 = crc;
777
778 /* Check for zeroes in the rest of the block */
779 for (i = sizeof(gpt_header_t); i < b_size; ++i) {
780 if (((uint8_t *) header)[i] != 0)
781 return EINVAL;
782 }
783
784 return EOK;
785}
786
787static gpt_partitions_t * alloc_part_array(uint32_t num)
788{
789 gpt_partitions_t * res = malloc(sizeof(gpt_partitions_t));
790 if (res == NULL) {
791 errno = ENOMEM;
792 return NULL;
793 }
794
795 uint32_t size = num > GPT_BASE_PART_NUM ? num : GPT_BASE_PART_NUM;
796 res->part_array = malloc(size * sizeof(gpt_entry_t));
797 if (res->part_array == NULL) {
798 free(res);
799 errno = ENOMEM;
800 return NULL;
801 }
802
803 memset(res->part_array, 0, size * sizeof(gpt_entry_t));
804
805 res->fill = 0;
806 res->arr_size = num;
807
808 return res;
809}
810
811static int extend_part_array(gpt_partitions_t * p)
812{
813 size_t nsize = p->arr_size * 2;
814 gpt_entry_t * tmp = malloc(nsize * sizeof(gpt_entry_t));
815 if (tmp == NULL) {
816 errno = ENOMEM;
817 return -1;
818 }
819
820 memcpy(tmp, p->part_array, p->fill * sizeof(gpt_entry_t));
821 free(p->part_array);
822 p->part_array = tmp;
823 p->arr_size = nsize;
824
825 return 0;
826}
827
828static int reduce_part_array(gpt_partitions_t * p)
829{
830 if (p->arr_size > GPT_MIN_PART_NUM) {
831 unsigned int nsize = p->arr_size / 2;
832 nsize = nsize > GPT_MIN_PART_NUM ? nsize : GPT_MIN_PART_NUM;
833 gpt_entry_t * tmp = malloc(nsize * sizeof(gpt_entry_t));
834 if (tmp == NULL)
835 return ENOMEM;
836
837 memcpy(tmp, p->part_array, p->fill < nsize ? p->fill : nsize);
838 free(p->part_array);
839 p->part_array = tmp;
840 p->arr_size = nsize;
841 }
842
843 return 0;
844}
845
846/* Parse a byte from a string in hexadecimal
847 * i.e., "FF" => 255
848 */
849static uint8_t get_byte(const char * c)
850{
851 uint8_t val = 0;
852 char hex[3] = {*c, *(c+1), 0};
853
854 errno = str_uint8_t(hex, NULL, 16, false, &val);
855 return val;
856}
857
858static bool check_overlap(gpt_part_t * p1, gpt_part_t * p2)
859{
860 if (gpt_get_start_lba(p1) < gpt_get_start_lba(p2) && gpt_get_end_lba(p1) < gpt_get_start_lba(p2)) {
861 return false;
862 } else if (gpt_get_start_lba(p1) > gpt_get_start_lba(p2) && gpt_get_end_lba(p2) < gpt_get_start_lba(p1)) {
863 return false;
864 }
865
866 return true;
867}
868
869static bool check_encaps(gpt_part_t *p, uint64_t n_blocks, uint64_t first_lba)
870{
871 /*
872 * We allow "<=" in the second expression because it lacks MBR so
873 * it's by 1 block smaller.
874 */
875 if (gpt_get_start_lba(p) >= first_lba && gpt_get_end_lba(p) <= n_blocks - first_lba)
876 return true;
877
878 return false;
879}
Note: See TracBrowser for help on using the repository browser.