Changeset 283ea3d in mainline for uspace/lib/gpt/libgpt.c
- Timestamp:
- 2013-07-28T22:52:16Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e3bc355
- Parents:
- 8559fa0 (diff), 675de6d (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/gpt/libgpt.c
r8559fa0 r283ea3d 51 51 #include "libgpt.h" 52 52 53 static int load_and_check_header(service_id_t handle, aoff64_t addr, size_t b_size, gpt_header_t *header);54 static gpt_partitions_t * alloc_part_array(uint32_t num);53 static int load_and_check_header(service_id_t, aoff64_t, size_t, gpt_header_t *); 54 static gpt_partitions_t * alloc_part_array(uint32_t); 55 55 static int extend_part_array(gpt_partitions_t *); 56 56 static int reduce_part_array(gpt_partitions_t *); 57 static long long nearest_larger_int(double a);57 //static long long nearest_larger_int(double); 58 58 static uint8_t get_byte(const char *); 59 static int check_overlap(gpt_part_t * p1, gpt_part_t * p2);59 static bool check_overlap(gpt_part_t *, gpt_part_t *); 60 60 61 61 /** Allocate memory for gpt label */ … … 66 66 return NULL; 67 67 68 /* This is necessary so that gpt_part_foreach does not segfault */ 69 label->parts = gpt_alloc_partitions(); 70 if (label == NULL) { 71 free(label); 72 return NULL; 73 } 74 68 75 label->gpt = NULL; 69 label->parts = NULL;76 70 77 label->device = 0; 71 78 … … 92 99 return NULL; 93 100 94 // We might need only sizeof(gpt_header_t), 95 // but we should follow specs and have 96 // zeroes through all the rest of the block 101 /* 102 * We might need only sizeof(gpt_header_t), but we should follow 103 * specs and have zeroes through all the rest of the block 104 */ 97 105 size_t final_size = size > sizeof(gpt_header_t) ? size : sizeof(gpt_header_t); 98 106 gpt->header = malloc(final_size); … … 178 186 int rc; 179 187 size_t b_size; 180 181 label->gpt->header->header_crc32 = 0; 182 label->gpt->header->header_crc32 = compute_crc32((uint8_t *) label->gpt->header, 183 uint32_t_le2host(label->gpt->header->header_size)); 184 185 rc = block_init(EXCHANGE_ATOMIC, dev_handle, b_size); 188 189 /* The comm_size argument (the last one) is ignored */ 190 rc = block_init(EXCHANGE_ATOMIC, dev_handle, 4096); 186 191 if (rc != EOK && rc != EEXIST) 187 192 return rc; 188 193 189 194 rc = block_get_bsize(dev_handle, &b_size); 190 195 if (rc != EOK) 191 196 return rc; 192 193 /* Write to main GPT header location */ 194 rc = block_write_direct(dev_handle, GPT_HDR_BA, GPT_HDR_BS, label->gpt->header); 195 if (rc != EOK) { 196 block_fini(dev_handle); 197 return rc; 198 } 199 197 200 198 aoff64_t n_blocks; 201 199 rc = block_get_nblocks(dev_handle, &n_blocks); … … 204 202 return rc; 205 203 } 206 204 205 uint64_t tmp; 206 207 /* Prepare the backup header */ 208 label->gpt->header->alternate_lba = label->gpt->header->my_lba; 209 label->gpt->header->my_lba = host2uint64_t_le(n_blocks - 1); 210 211 tmp = label->gpt->header->entry_lba; 212 label->gpt->header->entry_lba = host2uint64_t_le(n_blocks - 213 (uint32_t_le2host(label->gpt->header->fillries) * sizeof(gpt_entry_t)) 214 / b_size - 1); 215 216 label->gpt->header->header_crc32 = 0; 217 label->gpt->header->header_crc32 = host2uint32_t_le( 218 compute_crc32((uint8_t *) label->gpt->header, 219 uint32_t_le2host(label->gpt->header->header_size))); 220 207 221 /* Write to backup GPT header location */ 208 //FIXME: those idiots thought it would be cool to have these fields in reverse order...209 222 rc = block_write_direct(dev_handle, n_blocks - 1, GPT_HDR_BS, label->gpt->header); 223 if (rc != EOK) { 224 block_fini(dev_handle); 225 return rc; 226 } 227 228 229 /* Prepare the main header */ 230 label->gpt->header->entry_lba = tmp; 231 232 tmp = label->gpt->header->alternate_lba; 233 label->gpt->header->alternate_lba = label->gpt->header->my_lba; 234 label->gpt->header->my_lba = tmp; 235 236 label->gpt->header->header_crc32 = 0; 237 label->gpt->header->header_crc32 = host2uint32_t_le( 238 compute_crc32((uint8_t *) label->gpt->header, 239 uint32_t_le2host(label->gpt->header->header_size))); 240 241 /* Write to main GPT header location */ 242 rc = block_write_direct(dev_handle, GPT_HDR_BA, GPT_HDR_BS, label->gpt->header); 210 243 block_fini(dev_handle); 211 244 if (rc != EOK) 212 245 return rc; 213 246 247 214 248 return 0; 215 249 } … … 218 252 gpt_partitions_t * gpt_alloc_partitions() 219 253 { 220 return alloc_part_array( 128);254 return alloc_part_array(GPT_MIN_PART_NUM); 221 255 } 222 256 … … 230 264 int rc; 231 265 unsigned int i; 232 uint32_t fill = uint32_t_le2host(label->gpt->header->fillries);266 uint32_t fillries = uint32_t_le2host(label->gpt->header->fillries); 233 267 uint32_t ent_size = uint32_t_le2host(label->gpt->header->entry_size); 234 268 uint64_t ent_lba = uint64_t_le2host(label->gpt->header->entry_lba); 235 269 236 270 if (label->parts == NULL) { 237 label->parts = alloc_part_array(fill );271 label->parts = alloc_part_array(fillries); 238 272 if (label->parts == NULL) { 239 273 return ENOMEM; … … 241 275 } 242 276 243 /* We can limit comm_size like this:244 * - we don't need more bytes245 * - the size of GPT partition entry can be different to 128 bytes */246 277 /* comm_size is ignored */ 247 278 rc = block_init(EXCHANGE_SERIALIZE, label->device, sizeof(gpt_entry_t)); … … 258 289 aoff64_t pos = ent_lba * block_size; 259 290 260 /* Now we read just sizeof(gpt_entry_t) bytes for each entry from the device. 291 /* 292 * Now we read just sizeof(gpt_entry_t) bytes for each entry from the device. 261 293 * Hopefully, this does not bypass cache (no mention in libblock.c), 262 294 * and also allows us to have variable partition entry size (but we 263 295 * will always read just sizeof(gpt_entry_t) bytes - hopefully they 264 * don't break backward compatibility) */ 265 for (i = 0; i < fill; ++i) { 266 //FIXME: this does bypass cache... 296 * don't break backward compatibility) 297 */ 298 for (i = 0; i < fillries; ++i) { 299 /*FIXME: this does bypass cache... */ 267 300 rc = block_read_bytes_direct(label->device, pos, sizeof(gpt_entry_t), label->parts->part_array + i); 268 //FIXME: but seqread() is just too complex... 269 //rc = block_seqread(gpt->device, &bufpos, &buflen, &pos, res->part_array[i], sizeof(gpt_entry_t)); 301 /* 302 * FIXME: but seqread() is just too complex... 303 * rc = block_seqread(gpt->device, &bufpos, &buflen, &pos, res->part_array[i], sizeof(gpt_entry_t)); 304 */ 270 305 pos += ent_size; 271 306 … … 274 309 } 275 310 276 /* FIXME: so far my boasting about variable partition entry size 311 /* 312 * FIXME: so far my boasting about variable partition entry size 277 313 * will not work. The CRC32 checksums will be different. 278 314 * This can't be fixed easily - we'd have to run the checksum … … 280 316 */ 281 317 uint32_t crc = compute_crc32((uint8_t *) label->parts->part_array, 282 label->parts->fill* sizeof(gpt_entry_t));318 fillries * sizeof(gpt_entry_t)); 283 319 284 320 if(uint32_t_le2host(label->gpt->header->pe_array_crc32) != crc) … … 312 348 size_t b_size; 313 349 uint32_t e_size = uint32_t_le2host(label->gpt->header->entry_size); 314 size_t fill = label->parts->fill > GPT_MIN_PART_NUM ? label->parts->fill : GPT_MIN_PART_NUM; 315 316 label->gpt->header->pe_array_crc32 = compute_crc32( 350 size_t fillries = label->parts->fill > GPT_MIN_PART_NUM ? label->parts->fill : GPT_MIN_PART_NUM; 351 352 label->gpt->header->fillries = host2uint32_t_le(fillries); 353 label->gpt->header->pe_array_crc32 = host2uint32_t_le(compute_crc32( 317 354 (uint8_t *) label->parts->part_array, 318 fill * e_size);355 fillries * e_size)); 319 356 320 357 /* comm_size of 4096 is ignored */ … … 332 369 goto fail; 333 370 371 uint64_t arr_blocks = (fillries * sizeof(gpt_entry_t)) / b_size; 372 label->gpt->header->first_usable_lba = host2uint64_t_le(arr_blocks + 1); 373 label->gpt->header->last_usable_lba = host2uint64_t_le(n_blocks - arr_blocks - 2); 374 375 334 376 /* Write to backup GPT partition array location */ 335 //rc = block_write_direct(dev_handle, n_blocks - 1, GPT_HDR_BS, header->raw_data); 377 rc = block_write_direct(dev_handle, n_blocks - arr_blocks - 1, 378 arr_blocks, label->parts->part_array); 336 379 if (rc != EOK) 337 380 goto fail; … … 339 382 /* Write to main GPT partition array location */ 340 383 rc = block_write_direct(dev_handle, uint64_t_le2host(label->gpt->header->entry_lba), 341 nearest_larger_int((uint64_t_le2host(label->gpt->header->entry_size) * label->parts->fill) / b_size), 342 label->parts->part_array); 384 arr_blocks, label->parts->part_array); 343 385 if (rc != EOK) 344 386 goto fail; … … 385 427 { 386 428 gpt_part_t *p; 429 387 430 388 431 /* Find the first empty entry */ … … 440 483 int gpt_add_partition(gpt_label_t *label, gpt_part_t *partition) 441 484 { 442 if (label->parts->fill == label->parts->arr_size) { 443 if (extend_part_array(label->parts) == -1) 444 return ENOMEM; 445 } 446 447 /*FIXME: 448 * Check dimensions and stuff! */ 485 /* FIXME: Check dimensions! */ 449 486 gpt_part_foreach(label, p) { 450 487 if (gpt_get_part_type(p) != GPT_PTE_UNUSED) { … … 454 491 } 455 492 456 memcpy(label->parts->part_array + label->parts->fill++, 457 partition, sizeof(gpt_part_t)); 458 493 gpt_part_t *p; 494 /* Find the first empty entry */ 495 do { 496 if (label->parts->fill == label->parts->arr_size) { 497 if (extend_part_array(label->parts) == -1) 498 return ENOMEM; 499 } 500 501 p = label->parts->part_array + label->parts->fill++; 502 503 } while (gpt_get_part_type(p) != GPT_PTE_UNUSED); 504 505 506 memcpy(p, partition, sizeof(gpt_entry_t)); 459 507 460 508 … … 473 521 int gpt_remove_partition(gpt_label_t *label, size_t idx) 474 522 { 475 if (idx >= label->parts-> fill)523 if (idx >= label->parts->arr_size) 476 524 return EINVAL; 477 525 478 /* FIXME! 526 /* 527 * FIXME! 479 528 * If we allow blank spots, we break the array. If we have more than 480 529 * 128 partitions in the array and then remove something from 481 * the first 128 partitions, we would forget to write the last one.*/ 530 * the first 128 partitions, we would forget to write the last one. 531 */ 482 532 memset(label->parts->part_array + idx, 0, sizeof(gpt_entry_t)); 483 533 484 label->parts->fill -= 1; 485 486 /* FIXME! HOPEFULLY FIXED. 534 if (label->parts->fill > idx) 535 label->parts->fill = idx; 536 537 /* 538 * FIXME! HOPEFULLY FIXED. 487 539 * We cannot reduce the array so simply. We may have some partitions 488 * there since we allow blank spots. */ 540 * there since we allow blank spots. 541 */ 489 542 gpt_part_t * p; 490 if (label->parts->fill < (label->parts->arr_size / 2) - GPT_IGNORE_FILL_NUM) { 543 544 if (label->parts->fill > GPT_MIN_PART_NUM && 545 label->parts->fill < (label->parts->arr_size / 2) - GPT_IGNORE_FILL_NUM) { 491 546 for (p = gpt_get_partition_at(label, label->parts->arr_size / 2); 492 547 p < label->parts->part_array + label->parts->arr_size; ++p) { … … 633 688 } 634 689 635 // Internal functions follow // 690 /** Generate a new pseudo-random UUID 691 * @param uuid Pointer to the UUID to overwrite. 692 */ 693 void gpt_set_random_uuid(uint8_t * uuid) 694 { 695 srandom((unsigned int) uuid); 696 697 unsigned int i; 698 for (i = 0; i < 16/sizeof(long int); ++i) 699 ((long int *)uuid)[i] = random(); 700 701 } 702 703 /** Get next aligned address */ 704 uint64_t gpt_get_next_aligned(uint64_t addr, unsigned int alignment) 705 { 706 uint64_t div = addr / alignment; 707 return (div + 1) * alignment; 708 } 709 710 /* Internal functions follow */ 636 711 637 712 static int load_and_check_header(service_id_t dev_handle, aoff64_t addr, size_t b_size, gpt_header_t * header) … … 674 749 return NULL; 675 750 } 676 751 677 752 uint32_t size = num > GPT_BASE_PART_NUM ? num : GPT_BASE_PART_NUM; 678 753 res->part_array = malloc(size * sizeof(gpt_entry_t)); … … 682 757 return NULL; 683 758 } 684 685 res->fill = num; 686 res->arr_size = size; 759 760 memset(res->part_array, 0, size * sizeof(gpt_entry_t)); 761 762 res->fill = 0; 763 res->arr_size = num; 687 764 688 765 return res; … … 691 768 static int extend_part_array(gpt_partitions_t * p) 692 769 { 693 unsigned int nsize = p->arr_size * 2;770 size_t nsize = p->arr_size * 2; 694 771 gpt_entry_t * tmp = malloc(nsize * sizeof(gpt_entry_t)); 695 772 if(tmp == NULL) { … … 697 774 return -1; 698 775 } 699 700 memcpy(tmp, p->part_array, p->fill );776 777 memcpy(tmp, p->part_array, p->fill * sizeof(gpt_entry_t)); 701 778 free(p->part_array); 702 779 p->part_array = tmp; … … 715 792 return ENOMEM; 716 793 717 memcpy(tmp, p->part_array, p->fill < nsize ? p->fill 794 memcpy(tmp, p->part_array, p->fill < nsize ? p->fill : nsize); 718 795 free(p->part_array); 719 796 p->part_array = tmp; … … 724 801 } 725 802 726 //FIXME: replace this with a library call, if it exists 727 static long long nearest_larger_int(double a) 803 /*static long long nearest_larger_int(double a) 728 804 { 729 805 if ((long long) a == a) { … … 732 808 733 809 return ((long long) a) + 1; 734 } 735 810 }*/ 811 812 /* Parse a byte from a string in hexadecimal 813 * i.e., "FF" => 255 814 */ 736 815 static uint8_t get_byte(const char * c) 737 816 { … … 743 822 } 744 823 745 static intcheck_overlap(gpt_part_t * p1, gpt_part_t * p2)824 static bool check_overlap(gpt_part_t * p1, gpt_part_t * p2) 746 825 { 747 826 if (gpt_get_start_lba(p1) < gpt_get_start_lba(p2) && gpt_get_end_lba(p1) <= gpt_get_start_lba(p2)) { 748 return 0;827 return false; 749 828 } else if (gpt_get_start_lba(p1) > gpt_get_start_lba(p2) && gpt_get_end_lba(p2) <= gpt_get_start_lba(p1)) { 750 return 0;751 } 752 753 return 1;754 } 755 756 829 return false; 830 } 831 832 return true; 833 } 834 835
Note:
See TracChangeset
for help on using the changeset viewer.