source: mainline/uspace/lib/gpt/libgpt.c@ 61ab4a9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 61ab4a9 was 0435fe41, checked in by Dominik Taborsky (AT DOT) <brembyseznamcz>, 12 years ago

polishing libmbr, libgpt, hdisk

  • Property mode set to 100644
File size: 22.2 KB
Line 
1/*
2 * Copyright (c) 2011, 2012, 2013 Dominik Taborsky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup libgpt
30 * @{
31 */
32/** @file
33 */
34
35/* TODO:
36 * This implementation only supports fixed size partition entries. Specification
37 * requires otherwise, though. Use void * array and casting to achieve that.
38 */
39
40#include <ipc/bd.h>
41#include <async.h>
42#include <stdio.h>
43#include <block.h>
44#include <errno.h>
45#include <stdlib.h>
46#include <assert.h>
47#include <byteorder.h>
48#include <checksum.h>
49#include <mem.h>
50#include <sys/typefmt.h>
51
52
53#include "libgpt.h"
54
55static int load_and_check_header(service_id_t, aoff64_t, size_t, gpt_header_t *);
56static gpt_partitions_t * alloc_part_array(uint32_t);
57static int extend_part_array(gpt_partitions_t *);
58static int reduce_part_array(gpt_partitions_t *);
59static uint8_t get_byte(const char *);
60static bool check_overlap(gpt_part_t *, gpt_part_t *);
61static bool check_encaps(gpt_part_t *, uint64_t, uint64_t);
62
63/** Allocate memory for gpt label */
64gpt_label_t * gpt_alloc_label(void)
65{
66 gpt_label_t *label = malloc(sizeof(gpt_label_t));
67 if (label == NULL)
68 return NULL;
69
70 /* This is necessary so that gpt_part_foreach does not segfault */
71 label->parts = gpt_alloc_partitions();
72 if (label == NULL) {
73 free(label);
74 return NULL;
75 }
76
77 label->gpt = NULL;
78
79 label->device = 0;
80
81 return label;
82}
83
84/** Free gpt_label_t structure */
85void gpt_free_label(gpt_label_t *label)
86{
87 if (label->gpt != NULL)
88 gpt_free_gpt(label->gpt);
89
90 if (label->parts != NULL)
91 gpt_free_partitions(label->parts);
92
93 free(label);
94}
95
96/** Allocate memory for gpt header */
97gpt_t * gpt_alloc_header(size_t size)
98{
99 gpt_t *gpt = malloc(sizeof(gpt_t));
100 if (gpt == NULL)
101 return NULL;
102
103 /*
104 * We might need only sizeof(gpt_header_t), but we should follow
105 * specs and have zeroes through all the rest of the block
106 */
107 size_t final_size = size > sizeof(gpt_header_t) ? size : sizeof(gpt_header_t);
108 gpt->header = malloc(final_size);
109 if (gpt->header == NULL) {
110 free(gpt);
111 return NULL;
112 }
113
114 memset(gpt->header, 0, final_size);
115
116 return gpt;
117}
118
119/** free() GPT header including gpt->header_lba */
120void gpt_free_gpt(gpt_t *gpt)
121{
122 free(gpt->header);
123 free(gpt);
124}
125
126/** Read GPT from specific device
127 * @param label label structure to fill
128 * @param dev_handle device to read GPT from
129 *
130 * @return EOK on success, errorcode on error
131 */
132int gpt_read_header(gpt_label_t *label, service_id_t dev_handle)
133{
134 int rc;
135 size_t b_size;
136
137 rc = block_init(EXCHANGE_ATOMIC, dev_handle, 512);
138 if (rc != EOK)
139 goto fail;
140
141 rc = block_get_bsize(dev_handle, &b_size);
142 if (rc != EOK)
143 goto fini_fail;
144
145 if (label->gpt == NULL) {
146 label->gpt = gpt_alloc_header(b_size);
147 if (label->gpt == NULL) {
148 rc = ENOMEM;
149 goto fini_fail;
150 }
151 }
152
153 rc = load_and_check_header(dev_handle, GPT_HDR_BA, b_size, label->gpt->header);
154 if (rc == EBADCHECKSUM || rc == EINVAL) {
155 aoff64_t n_blocks;
156 rc = block_get_nblocks(dev_handle, &n_blocks);
157 if (rc != EOK)
158 goto free_fail;
159
160 rc = load_and_check_header(dev_handle, n_blocks - 1, b_size, label->gpt->header);
161 if (rc == EBADCHECKSUM || rc == EINVAL)
162 goto free_fail;
163 }
164
165 label->device = dev_handle;
166 block_fini(dev_handle);
167 return EOK;
168
169free_fail:
170 gpt_free_gpt(label->gpt);
171 label->gpt = NULL;
172fini_fail:
173 block_fini(dev_handle);
174fail:
175 return rc;
176}
177
178/** Write GPT header to device
179 * @param label GPT label header to be written
180 * @param dev_handle device handle to write the data to
181 *
182 * @return EOK on success, libblock error code otherwise
183 *
184 * Note: Firstly write partitions (if modified), then gpt header.
185 */
186int gpt_write_header(gpt_label_t *label, service_id_t dev_handle)
187{
188 int rc;
189 size_t b_size;
190
191 /* The comm_size argument (the last one) is ignored */
192 rc = block_init(EXCHANGE_ATOMIC, dev_handle, 4096);
193 if (rc != EOK && rc != EEXIST)
194 return rc;
195
196 rc = block_get_bsize(dev_handle, &b_size);
197 if (rc != EOK)
198 return rc;
199
200 aoff64_t n_blocks;
201 rc = block_get_nblocks(dev_handle, &n_blocks);
202 if (rc != EOK) {
203 block_fini(dev_handle);
204 return rc;
205 }
206
207 uint64_t tmp;
208
209 /* Prepare the backup header */
210 label->gpt->header->alternate_lba = label->gpt->header->my_lba;
211 label->gpt->header->my_lba = host2uint64_t_le(n_blocks - 1);
212
213 tmp = label->gpt->header->entry_lba;
214 label->gpt->header->entry_lba = host2uint64_t_le(n_blocks -
215 (uint32_t_le2host(label->gpt->header->fillries) * sizeof(gpt_entry_t))
216 / b_size - 1);
217
218 label->gpt->header->header_crc32 = 0;
219 label->gpt->header->header_crc32 = host2uint32_t_le(
220 compute_crc32((uint8_t *) label->gpt->header,
221 uint32_t_le2host(label->gpt->header->header_size)));
222
223 /* Write to backup GPT header location */
224 rc = block_write_direct(dev_handle, n_blocks - 1, GPT_HDR_BS, label->gpt->header);
225 if (rc != EOK) {
226 block_fini(dev_handle);
227 return rc;
228 }
229
230
231 /* Prepare the main header */
232 label->gpt->header->entry_lba = tmp;
233
234 tmp = label->gpt->header->alternate_lba;
235 label->gpt->header->alternate_lba = label->gpt->header->my_lba;
236 label->gpt->header->my_lba = tmp;
237
238 label->gpt->header->header_crc32 = 0;
239 label->gpt->header->header_crc32 = host2uint32_t_le(
240 compute_crc32((uint8_t *) label->gpt->header,
241 uint32_t_le2host(label->gpt->header->header_size)));
242
243 /* Write to main GPT header location */
244 rc = block_write_direct(dev_handle, GPT_HDR_BA, GPT_HDR_BS, label->gpt->header);
245 block_fini(dev_handle);
246 if (rc != EOK)
247 return rc;
248
249
250 return 0;
251}
252
253/** Alloc partition array */
254gpt_partitions_t * gpt_alloc_partitions()
255{
256 return alloc_part_array(GPT_MIN_PART_NUM);
257}
258
259/** Parse partitions from GPT
260 * @param label GPT label to be parsed
261 *
262 * @return EOK on success, errorcode otherwise
263 */
264int gpt_read_partitions(gpt_label_t *label)
265{
266 int rc;
267 unsigned int i;
268 uint32_t fillries = uint32_t_le2host(label->gpt->header->fillries);
269 uint32_t ent_size = uint32_t_le2host(label->gpt->header->entry_size);
270 uint64_t ent_lba = uint64_t_le2host(label->gpt->header->entry_lba);
271
272 if (label->parts == NULL) {
273 label->parts = alloc_part_array(fillries);
274 if (label->parts == NULL) {
275 return ENOMEM;
276 }
277 }
278
279 /* comm_size is ignored */
280 rc = block_init(EXCHANGE_SERIALIZE, label->device, sizeof(gpt_entry_t));
281 if (rc != EOK)
282 goto fail;
283
284 size_t block_size;
285 rc = block_get_bsize(label->device, &block_size);
286 if (rc != EOK)
287 goto fini_fail;
288
289 //size_t bufpos = 0;
290 //size_t buflen = 0;
291 aoff64_t pos = ent_lba * block_size;
292
293 /*
294 * Now we read just sizeof(gpt_entry_t) bytes for each entry from the device.
295 * Hopefully, this does not bypass cache (no mention in libblock.c),
296 * and also allows us to have variable partition entry size (but we
297 * will always read just sizeof(gpt_entry_t) bytes - hopefully they
298 * don't break backward compatibility)
299 */
300 for (i = 0; i < fillries; ++i) {
301 /*FIXME: this does bypass cache... */
302 rc = block_read_bytes_direct(label->device, pos, sizeof(gpt_entry_t), label->parts->part_array + i);
303 /*
304 * FIXME: but seqread() is just too complex...
305 * rc = block_seqread(gpt->device, &bufpos, &buflen, &pos, res->part_array[i], sizeof(gpt_entry_t));
306 */
307 pos += ent_size;
308
309 if (rc != EOK)
310 goto fini_fail;
311 }
312
313 uint32_t crc = compute_crc32((uint8_t *) label->parts->part_array,
314 fillries * ent_size);
315
316 if (uint32_t_le2host(label->gpt->header->pe_array_crc32) != crc)
317 {
318 rc = EBADCHECKSUM;
319 goto fini_fail;
320 }
321
322 block_fini(label->device);
323 return EOK;
324
325fini_fail:
326 block_fini(label->device);
327
328fail:
329 gpt_free_partitions(label->parts);
330 label->parts = NULL;
331 return rc;
332}
333
334/** Write GPT and partitions to device
335 * Note: also writes the header.
336 * @param label label to write
337 * @param dev_handle device to write the data to
338 *
339 * @return returns EOK on succes, errorcode otherwise
340 */
341int gpt_write_partitions(gpt_label_t *label, service_id_t dev_handle)
342{
343 int rc;
344 size_t b_size;
345 uint32_t e_size = uint32_t_le2host(label->gpt->header->entry_size);
346 size_t fillries = label->parts->fill > GPT_MIN_PART_NUM ? label->parts->fill : GPT_MIN_PART_NUM;
347
348 if (e_size != sizeof(gpt_entry_t))
349 return ENOTSUP;
350
351 /* comm_size of 4096 is ignored */
352 rc = block_init(EXCHANGE_ATOMIC, dev_handle, 4096);
353 if (rc != EOK && rc != EEXIST)
354 return rc;
355
356 rc = block_get_bsize(dev_handle, &b_size);
357 if (rc != EOK)
358 goto fail;
359
360 aoff64_t n_blocks;
361 rc = block_get_nblocks(dev_handle, &n_blocks);
362 if (rc != EOK)
363 goto fail;
364
365 label->gpt->header->fillries = host2uint32_t_le(fillries);
366 uint64_t arr_blocks = (fillries * sizeof(gpt_entry_t)) / b_size;
367 uint64_t gpt_space = arr_blocks + GPT_HDR_BS + 1; /* +1 for Protective MBR */
368 label->gpt->header->first_usable_lba = host2uint64_t_le(gpt_space);
369 label->gpt->header->last_usable_lba = host2uint64_t_le(n_blocks - gpt_space - 1);
370
371 /* Perform checks */
372 gpt_part_foreach (label, p) {
373 if (gpt_get_part_type(p) == GPT_PTE_UNUSED)
374 continue;
375
376 if (!check_encaps(p, n_blocks, gpt_space)) {
377 rc = ERANGE;
378 printf("encaps with: %" PRIuOFF64 ", %" PRIu64 ", %" PRIu64 "\n",
379 n_blocks, gpt_space, gpt_get_end_lba(p));
380 goto fail;
381 }
382
383 gpt_part_foreach (label, q) {
384 if (p == q)
385 continue;
386
387 if (gpt_get_part_type(p) != GPT_PTE_UNUSED) {
388 if (check_overlap(p, q)) {
389 printf("overlap with: %" PRIu64 ", %" PRIu64 "\n",
390 gpt_get_start_lba(p), gpt_get_start_lba(q));
391 rc = ERANGE;
392 goto fail;
393 }
394 }
395 }
396 }
397
398 label->gpt->header->pe_array_crc32 = host2uint32_t_le(compute_crc32(
399 (uint8_t *) label->parts->part_array,
400 fillries * e_size));
401
402
403 /* Write to backup GPT partition array location */
404 rc = block_write_direct(dev_handle, n_blocks - arr_blocks - 1,
405 arr_blocks, label->parts->part_array);
406 if (rc != EOK)
407 goto fail;
408
409 /* Write to main GPT partition array location */
410 rc = block_write_direct(dev_handle, uint64_t_le2host(label->gpt->header->entry_lba),
411 arr_blocks, label->parts->part_array);
412 if (rc != EOK)
413 goto fail;
414
415 return gpt_write_header(label, dev_handle);
416
417fail:
418 block_fini(dev_handle);
419 return rc;
420}
421
422/** Alloc new partition
423 *
424 * @return returns pointer to the new partition or NULL
425 *
426 * Note: use either gpt_alloc_partition or gpt_get_partition.
427 * This returns a memory block (zero-filled) and needs gpt_add_partition()
428 * to be called to insert it into a partition array.
429 * Requires you to call gpt_free_partition afterwards.
430 */
431gpt_part_t * gpt_alloc_partition(void)
432{
433 gpt_part_t *p = malloc(sizeof(gpt_part_t));
434 if (p == NULL)
435 return NULL;
436
437 memset(p, 0, sizeof(gpt_part_t));
438
439 return p;
440}
441
442/** Alloc new partition already inside the label
443 *
444 * @param label label to carry new partition
445 *
446 * @return returns pointer to the new partition or NULL on ENOMEM
447 *
448 * Note: use either gpt_alloc_partition or gpt_get_partition.
449 * This one returns a pointer to the first empty structure already
450 * inside the array, so don't call gpt_add_partition() afterwards.
451 * This is the one you will usually want.
452 */
453gpt_part_t * gpt_get_partition(gpt_label_t *label)
454{
455 gpt_part_t *p;
456
457
458 /* Find the first empty entry */
459 do {
460 if (label->parts->fill == label->parts->arr_size) {
461 if (extend_part_array(label->parts) == -1)
462 return NULL;
463 }
464
465 p = label->parts->part_array + label->parts->fill++;
466
467 } while (gpt_get_part_type(p) != GPT_PTE_UNUSED);
468
469 return p;
470}
471
472/** Get partition already inside the label
473 *
474 * @param label label to carrying the partition
475 * @param idx index of the partition
476 *
477 * @return returns pointer to the partition
478 * or NULL when out of range
479 *
480 * Note: For new partitions use either gpt_alloc_partition or
481 * gpt_get_partition unless you want a partition at a specific place.
482 * This returns a pointer to a structure already inside the array,
483 * so don't call gpt_add_partition() afterwards.
484 * This function is handy when you want to change already existing
485 * partition or to simply write somewhere in the middle. This works only
486 * for indexes smaller than either 128 or the actual number of filled
487 * entries.
488 */
489gpt_part_t * gpt_get_partition_at(gpt_label_t *label, size_t idx)
490{
491 return NULL;
492
493 if (idx >= GPT_MIN_PART_NUM && idx >= label->parts->fill)
494 return NULL;
495
496 return label->parts->part_array + idx;
497}
498
499/** Copy partition into partition array
500 *
501 * @param parts target label
502 * @param partition source partition to copy
503 *
504 * @return -1 on error, 0 otherwise
505 *
506 * Note: for use with gpt_alloc_partition() only. You will get
507 * duplicates with gpt_get_partition().
508 * Note: does not call gpt_free_partition()!
509 */
510int gpt_add_partition(gpt_label_t *label, gpt_part_t *partition)
511{
512 gpt_part_t *p;
513 /* Find the first empty entry */
514 do {
515 if (label->parts->fill == label->parts->arr_size) {
516 if (extend_part_array(label->parts) == -1)
517 return ENOMEM;
518 }
519
520 p = label->parts->part_array + label->parts->fill++;
521
522 } while (gpt_get_part_type(p) != GPT_PTE_UNUSED);
523
524
525 memcpy(p, partition, sizeof(gpt_entry_t));
526
527
528 return EOK;
529}
530
531/** Remove partition from array
532 * @param label label to remove from
533 * @param idx index of the partition to remove
534 *
535 * @return EOK on success, ENOMEM on array reduction failure
536 *
537 * Note: even if it fails, the partition still gets removed. Only
538 * reducing the array failed.
539 */
540int gpt_remove_partition(gpt_label_t *label, size_t idx)
541{
542 if (idx >= label->parts->arr_size)
543 return EINVAL;
544
545 /*
546 * FIXME!
547 * If we allow blank spots, we break the array. If we have more than
548 * 128 partitions in the array and then remove something from
549 * the first 128 partitions, we would forget to write the last one.
550 */
551 memset(label->parts->part_array + idx, 0, sizeof(gpt_entry_t));
552
553 if (label->parts->fill > idx)
554 label->parts->fill = idx;
555
556 /*
557 * FIXME! HOPEFULLY FIXED.
558 * We cannot reduce the array so simply. We may have some partitions
559 * there since we allow blank spots.
560 */
561 gpt_part_t * p;
562
563 if (label->parts->fill > GPT_MIN_PART_NUM &&
564 label->parts->fill < (label->parts->arr_size / 2) - GPT_IGNORE_FILL_NUM) {
565 for (p = gpt_get_partition_at(label, label->parts->arr_size / 2);
566 p < label->parts->part_array + label->parts->arr_size; ++p) {
567 if (gpt_get_part_type(p) != GPT_PTE_UNUSED)
568 return EOK;
569 }
570
571 if (reduce_part_array(label->parts) == ENOMEM)
572 return ENOMEM;
573 }
574
575 return EOK;
576}
577
578/** Free partition list
579 *
580 * @param parts partition list to be freed
581 */
582void gpt_free_partitions(gpt_partitions_t * parts)
583{
584 free(parts->part_array);
585 free(parts);
586}
587
588/** Get partition type by linear search
589 * (hopefully this doesn't get slow)
590 */
591size_t gpt_get_part_type(gpt_part_t * p)
592{
593 size_t i;
594
595 for (i = 0; gpt_ptypes[i].guid != NULL; i++) {
596 if (p->part_type[3] == get_byte(gpt_ptypes[i].guid +0) &&
597 p->part_type[2] == get_byte(gpt_ptypes[i].guid +2) &&
598 p->part_type[1] == get_byte(gpt_ptypes[i].guid +4) &&
599 p->part_type[0] == get_byte(gpt_ptypes[i].guid +6) &&
600
601 p->part_type[5] == get_byte(gpt_ptypes[i].guid +8) &&
602 p->part_type[4] == get_byte(gpt_ptypes[i].guid +10) &&
603
604 p->part_type[7] == get_byte(gpt_ptypes[i].guid +12) &&
605 p->part_type[6] == get_byte(gpt_ptypes[i].guid +14) &&
606
607 p->part_type[8] == get_byte(gpt_ptypes[i].guid +16) &&
608 p->part_type[9] == get_byte(gpt_ptypes[i].guid +18) &&
609 p->part_type[10] == get_byte(gpt_ptypes[i].guid +20) &&
610 p->part_type[11] == get_byte(gpt_ptypes[i].guid +22) &&
611 p->part_type[12] == get_byte(gpt_ptypes[i].guid +24) &&
612 p->part_type[13] == get_byte(gpt_ptypes[i].guid +26) &&
613 p->part_type[14] == get_byte(gpt_ptypes[i].guid +28) &&
614 p->part_type[15] == get_byte(gpt_ptypes[i].guid +30))
615 break;
616 }
617
618 return i;
619}
620
621/** Set partition type
622 * @param p partition to be set
623 * @param type partition type to set
624 * - see our fine selection at gpt_ptypes to choose from
625 */
626void gpt_set_part_type(gpt_part_t * p, size_t type)
627{
628 /* Beware: first 3 blocks are byteswapped! */
629 p->part_type[3] = gpt_ptypes[type].guid[0];
630 p->part_type[2] = gpt_ptypes[type].guid[1];
631 p->part_type[1] = gpt_ptypes[type].guid[2];
632 p->part_type[0] = gpt_ptypes[type].guid[3];
633
634 p->part_type[5] = gpt_ptypes[type].guid[4];
635 p->part_type[4] = gpt_ptypes[type].guid[5];
636
637 p->part_type[7] = gpt_ptypes[type].guid[6];
638 p->part_type[6] = gpt_ptypes[type].guid[7];
639
640 p->part_type[8] = gpt_ptypes[type].guid[8];
641 p->part_type[9] = gpt_ptypes[type].guid[9];
642 p->part_type[10] = gpt_ptypes[type].guid[10];
643 p->part_type[11] = gpt_ptypes[type].guid[11];
644 p->part_type[12] = gpt_ptypes[type].guid[12];
645 p->part_type[13] = gpt_ptypes[type].guid[13];
646 p->part_type[14] = gpt_ptypes[type].guid[14];
647 p->part_type[15] = gpt_ptypes[type].guid[15];
648}
649
650/** Get partition starting LBA */
651uint64_t gpt_get_start_lba(gpt_part_t * p)
652{
653 return uint64_t_le2host(p->start_lba);
654}
655
656/** Set partition starting LBA */
657void gpt_set_start_lba(gpt_part_t * p, uint64_t start)
658{
659 p->start_lba = host2uint64_t_le(start);
660}
661
662/** Get partition ending LBA */
663uint64_t gpt_get_end_lba(gpt_part_t * p)
664{
665 return uint64_t_le2host(p->end_lba);
666}
667
668/** Set partition ending LBA */
669void gpt_set_end_lba(gpt_part_t * p, uint64_t end)
670{
671 p->end_lba = host2uint64_t_le(end);
672}
673
674/** Get partition name */
675unsigned char * gpt_get_part_name(gpt_part_t * p)
676{
677 return p->part_name;
678}
679
680/** Copy partition name */
681void gpt_set_part_name(gpt_part_t *p, char *name, size_t length)
682{
683 if (length >= 72)
684 length = 71;
685
686 memcpy(p->part_name, name, length);
687 p->part_name[length] = '\0';
688}
689
690/** Get partition attribute */
691bool gpt_get_flag(gpt_part_t * p, GPT_ATTR flag)
692{
693 return (p->attributes & (((uint64_t) 1) << flag)) ? 1 : 0;
694}
695
696/** Set partition attribute */
697void gpt_set_flag(gpt_part_t * p, GPT_ATTR flag, bool value)
698{
699 uint64_t attr = p->attributes;
700
701 if (value)
702 attr = attr | (((uint64_t) 1) << flag);
703 else
704 attr = attr ^ (attr & (((uint64_t) 1) << flag));
705
706 p->attributes = attr;
707}
708
709/** Generate a new pseudo-random UUID
710 * @param uuid Pointer to the UUID to overwrite.
711 */
712void gpt_set_random_uuid(uint8_t * uuid)
713{
714 srandom((unsigned int) (size_t) uuid);
715
716 unsigned int i;
717 for (i = 0; i < 16/sizeof(long int); ++i)
718 ((long int *)uuid)[i] = random();
719
720}
721
722/** Get next aligned address */
723uint64_t gpt_get_next_aligned(uint64_t addr, unsigned int alignment)
724{
725 uint64_t div = addr / alignment;
726 return (div + 1) * alignment;
727}
728
729/* Internal functions follow */
730
731static int load_and_check_header(service_id_t dev_handle, aoff64_t addr, size_t b_size, gpt_header_t * header)
732{
733 int rc;
734
735 rc = block_read_direct(dev_handle, addr, GPT_HDR_BS, header);
736 if (rc != EOK)
737 return rc;
738
739 unsigned int i;
740 /* Check the EFI signature */
741 for (i = 0; i < 8; ++i) {
742 if (header->efi_signature[i] != efi_signature[i])
743 return EINVAL;
744 }
745
746 /* Check the CRC32 of the header */
747 uint32_t crc = header->header_crc32;
748 header->header_crc32 = 0;
749 if (crc != compute_crc32((uint8_t *) header, header->header_size))
750 return EBADCHECKSUM;
751 else
752 header->header_crc32 = crc;
753
754 /* Check for zeroes in the rest of the block */
755 for (i = sizeof(gpt_header_t); i < b_size; ++i) {
756 if (((uint8_t *) header)[i] != 0)
757 return EINVAL;
758 }
759
760 return EOK;
761}
762
763static gpt_partitions_t * alloc_part_array(uint32_t num)
764{
765 gpt_partitions_t * res = malloc(sizeof(gpt_partitions_t));
766 if (res == NULL) {
767 errno = ENOMEM;
768 return NULL;
769 }
770
771 uint32_t size = num > GPT_BASE_PART_NUM ? num : GPT_BASE_PART_NUM;
772 res->part_array = malloc(size * sizeof(gpt_entry_t));
773 if (res->part_array == NULL) {
774 free(res);
775 errno = ENOMEM;
776 return NULL;
777 }
778
779 memset(res->part_array, 0, size * sizeof(gpt_entry_t));
780
781 res->fill = 0;
782 res->arr_size = num;
783
784 return res;
785}
786
787static int extend_part_array(gpt_partitions_t * p)
788{
789 size_t nsize = p->arr_size * 2;
790 gpt_entry_t * tmp = malloc(nsize * sizeof(gpt_entry_t));
791 if (tmp == NULL) {
792 errno = ENOMEM;
793 return -1;
794 }
795
796 memcpy(tmp, p->part_array, p->fill * sizeof(gpt_entry_t));
797 free(p->part_array);
798 p->part_array = tmp;
799 p->arr_size = nsize;
800
801 return 0;
802}
803
804static int reduce_part_array(gpt_partitions_t * p)
805{
806 if (p->arr_size > GPT_MIN_PART_NUM) {
807 unsigned int nsize = p->arr_size / 2;
808 nsize = nsize > GPT_MIN_PART_NUM ? nsize : GPT_MIN_PART_NUM;
809 gpt_entry_t * tmp = malloc(nsize * sizeof(gpt_entry_t));
810 if (tmp == NULL)
811 return ENOMEM;
812
813 memcpy(tmp, p->part_array, p->fill < nsize ? p->fill : nsize);
814 free(p->part_array);
815 p->part_array = tmp;
816 p->arr_size = nsize;
817 }
818
819 return 0;
820}
821
822/* Parse a byte from a string in hexadecimal
823 * i.e., "FF" => 255
824 */
825static uint8_t get_byte(const char * c)
826{
827 uint8_t val = 0;
828 char hex[3] = {*c, *(c+1), 0};
829
830 errno = str_uint8_t(hex, NULL, 16, false, &val);
831 return val;
832}
833
834static bool check_overlap(gpt_part_t * p1, gpt_part_t * p2)
835{
836 if (gpt_get_start_lba(p1) < gpt_get_start_lba(p2) && gpt_get_end_lba(p1) < gpt_get_start_lba(p2)) {
837 return false;
838 } else if (gpt_get_start_lba(p1) > gpt_get_start_lba(p2) && gpt_get_end_lba(p2) < gpt_get_start_lba(p1)) {
839 return false;
840 }
841
842 return true;
843}
844
845static bool check_encaps(gpt_part_t *p, uint64_t n_blocks, uint64_t first_lba)
846{
847 /*
848 * We allow "<=" in the second expression because it lacks MBR so
849 * it's by 1 block smaller.
850 */
851 if (gpt_get_start_lba(p) >= first_lba && gpt_get_end_lba(p) <= n_blocks - first_lba)
852 return true;
853
854 return false;
855}
Note: See TracBrowser for help on using the repository browser.