Changes in / [3dbe4ca2:933cadf] in mainline
- Location:
- uspace/srv/fs/fat
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/fs/fat/fat_fat.c
r3dbe4ca2 r933cadf 29 29 /** @addtogroup fs 30 30 * @{ 31 */ 31 */ 32 32 33 33 /** … … 54 54 * primitive boot sector members. 55 55 */ 56 #define RDS(bs) ((sizeof(fat_dentry_t) * RDE((bs))) / BPS((bs))) + \ 57 (((sizeof(fat_dentry_t) * RDE((bs))) % BPS((bs))) != 0) 58 #define SSA(bs) (RSCNT((bs)) + FATCNT((bs)) * SF((bs)) + RDS(bs)) 59 56 60 #define CLBN2PBN(bs, cl, bn) \ 57 61 (SSA((bs)) + ((cl) - FAT_CLST_FIRST) * SPC((bs)) + (bn) % SPC((bs))) … … 61 65 * during allocation of clusters. The lock does not have to be held durring 62 66 * deallocation of clusters. 63 */ 67 */ 64 68 static FIBRIL_MUTEX_INITIALIZE(fat_alloc_lock); 65 69 … … 73 77 * @param numc If non-NULL, output argument holding the number of 74 78 * clusters seen during the walk. 75 * @param max_clusters Maximum number of clusters to visit. 79 * @param max_clusters Maximum number of clusters to visit. 76 80 * 77 81 * @return EOK on success or a negative error code. 78 82 */ 79 int 83 int 80 84 fat_cluster_walk(fat_bs_t *bs, devmap_handle_t devmap_handle, fat_cluster_t firstc, 81 85 fat_cluster_t *lastc, uint16_t *numc, uint16_t max_clusters) 82 86 { 87 block_t *b; 83 88 uint16_t clusters = 0; 84 fat_cluster_t clst = firstc, clst_last1 = FAT_CLST_LAST1(bs); 85 fat_cluster_t clst_bad = FAT_CLST_BAD(bs); 89 fat_cluster_t clst = firstc; 86 90 int rc; 87 91 … … 95 99 } 96 100 97 while (clst < clst_last1 && clusters < max_clusters) { 101 while (clst < FAT_CLST_LAST1 && clusters < max_clusters) { 102 aoff64_t fsec; /* sector offset relative to FAT1 */ 103 unsigned fidx; /* FAT1 entry index */ 104 98 105 assert(clst >= FAT_CLST_FIRST); 99 106 if (lastc) 100 107 *lastc = clst; /* remember the last cluster number */ 101 108 fsec = (clst * sizeof(fat_cluster_t)) / BPS(bs); 109 fidx = clst % (BPS(bs) / sizeof(fat_cluster_t)); 102 110 /* read FAT1 */ 103 rc = fat_get_cluster(bs, devmap_handle, FAT1, clst, &clst); 104 if (rc != EOK) 105 return rc; 106 107 assert(clst != clst_bad); 111 rc = block_get(&b, devmap_handle, RSCNT(bs) + fsec, 112 BLOCK_FLAGS_NONE); 113 if (rc != EOK) 114 return rc; 115 clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]); 116 assert(clst != FAT_CLST_BAD); 117 rc = block_put(b); 118 if (rc != EOK) 119 return rc; 108 120 clusters++; 109 121 } 110 122 111 if (lastc && clst < clst_last1)123 if (lastc && clst < FAT_CLST_LAST1) 112 124 *lastc = clst; 113 125 if (numc) … … 139 151 return ELIMIT; 140 152 141 if (nodep->firstc == FAT_CLST_ROOT) 153 if (nodep->firstc == FAT_CLST_ROOT) 142 154 goto fall_through; 143 155 … … 166 178 if (rc != EOK) 167 179 return rc; 168 180 169 181 /* 170 182 * Update the "current" cluster cache. … … 186 198 * @param clp If not NULL, address where the cluster containing bn 187 199 * will be stored. 188 * stored 200 * stored 189 201 * @param bn Block number. 190 202 * @param flags Flags passed to libblock. … … 263 275 return rc; 264 276 } 265 277 266 278 if (o >= pos) 267 279 return EOK; 268 280 269 281 /* zero out the initial part of the new cluster chain */ 270 282 for (o = boundary; o < pos; o += BPS(bs)) { … … 296 308 fat_cluster_t clst, fat_cluster_t *value) 297 309 { 298 block_t *b, *b1; 299 aoff64_t offset; 300 int rc; 301 302 assert(fatno < FATCNT(bs)); 303 304 if (FAT_IS_FAT12(bs)) 305 offset = (clst + clst/2); 306 else 307 offset = (clst * sizeof(fat_cluster_t)); 310 block_t *b; 311 fat_cluster_t *cp; 312 int rc; 308 313 309 314 rc = block_get(&b, devmap_handle, RSCNT(bs) + SF(bs) * fatno + 310 offset/ BPS(bs), BLOCK_FLAGS_NONE);315 (clst * sizeof(fat_cluster_t)) / BPS(bs), BLOCK_FLAGS_NONE); 311 316 if (rc != EOK) 312 317 return rc; 313 314 /* This cluster access spans a sector boundary. Check only for FAT12 */ 315 if (FAT_IS_FAT12(bs) && (offset % BPS(bs)+1 == BPS(bs))) { 316 /* Is it last sector of FAT? */ 317 if (offset / BPS(bs) < SF(bs)) { 318 /* No. Reading next sector */ 319 rc = block_get(&b1, devmap_handle, 1 + RSCNT(bs) + 320 SF(bs)*fatno + offset / BPS(bs), BLOCK_FLAGS_NONE); 321 if (rc != EOK) { 322 block_put(b); 323 return rc; 324 } 325 /* 326 * Combining value with last byte of current sector and 327 * first byte of next sector 328 */ 329 *value = *(uint8_t *)(b->data + BPS(bs) - 1); 330 *value |= *(uint8_t *)(b1->data); 331 332 rc = block_put(b1); 333 if (rc != EOK) { 334 block_put(b); 335 return rc; 336 } 337 } 338 else { 339 /* Yes. It is last sector of FAT */ 340 block_put(b); 341 return ERANGE; 342 } 343 } 344 else 345 *value = *(fat_cluster_t *)(b->data + offset % BPS(bs)); 346 347 if (FAT_IS_FAT12(bs)) { 348 if (clst & 0x0001) 349 *value = (*value) >> 4; 350 else 351 *value = (*value) & 0x0fff; 352 } 353 354 *value = uint16_t_le2host(*value); 318 cp = (fat_cluster_t *)b->data + 319 clst % (BPS(bs) / sizeof(fat_cluster_t)); 320 *value = uint16_t_le2host(*cp); 355 321 rc = block_put(b); 356 322 357 323 return rc; 358 324 } … … 372 338 fat_cluster_t clst, fat_cluster_t value) 373 339 { 374 block_t *b, *b1; 375 aoff64_t offset; 376 fat_cluster_t *cp, temp; 377 int rc; 378 int spans = 0; 340 block_t *b; 341 fat_cluster_t *cp; 342 int rc; 379 343 380 344 assert(fatno < FATCNT(bs)); 381 382 if (FAT_IS_FAT12(bs))383 offset = (clst + clst/2);384 else385 offset = (clst * sizeof(fat_cluster_t));386 387 345 rc = block_get(&b, devmap_handle, RSCNT(bs) + SF(bs) * fatno + 388 offset/ BPS(bs), BLOCK_FLAGS_NONE);346 (clst * sizeof(fat_cluster_t)) / BPS(bs), BLOCK_FLAGS_NONE); 389 347 if (rc != EOK) 390 348 return rc; 391 392 /* This cluster access spans a sector boundary. Check only for FAT12 */ 393 if (FAT_IS_FAT12(bs) && (offset % BPS(bs)+1 == BPS(bs))) { 394 /* Is it last sector of FAT? */ 395 if (offset / BPS(bs) < SF(bs)) { 396 /* No. Reading next sector */ 397 rc = block_get(&b1, devmap_handle, 1 + RSCNT(bs) + 398 SF(bs)*fatno + offset / BPS(bs), BLOCK_FLAGS_NONE); 399 if (rc != EOK) { 400 block_put(b); 401 return rc; 402 } 403 /* 404 * Combining value with last byte of current sector and 405 * first byte of next sector 406 */ 407 spans=1; 408 cp = &temp; 409 *cp = *(uint8_t *)(b->data + BPS(bs) - 1); 410 *cp |= *(uint8_t *)(b1->data); 411 } 412 else { 413 /* Yes. It is last sector of fat */ 414 block_put(b); 415 return ERANGE; 416 } 417 } 418 else 419 cp = (fat_cluster_t *)(b->data + offset % BPS(bs)); 420 421 value = host2uint16_t_le(value); 422 if (FAT_IS_FAT12(bs)) { 423 if (clst & 0x0001) { 424 *cp &= 0x000f; 425 *cp |= value << 4; 426 } 427 else { 428 *cp &= 0xf000; 429 *cp |= value & 0x0fff; 430 } 431 432 if (spans) 433 { 434 *(uint8_t *)(b->data + BPS(bs) - 1) = cp[0]; 435 *(uint8_t *)(b1->data) = cp[1]; 436 437 b1->dirty = true; 438 rc = block_put(b1); 439 if (rc != EOK) { 440 block_put(b); 441 return rc; 442 } 443 } 444 } 445 else 446 *cp = value; 447 448 b->dirty = true; /* need to sync block */ 349 cp = (fat_cluster_t *)b->data + 350 clst % (BPS(bs) / sizeof(fat_cluster_t)); 351 *cp = host2uint16_t_le(value); 352 b->dirty = true; /* need to sync block */ 449 353 rc = block_put(b); 450 354 return rc; … … 465 369 uint8_t fatno; 466 370 unsigned c; 467 fat_cluster_t clst_last1 = FAT_CLST_LAST1(bs);468 371 int rc; 469 372 … … 471 374 for (c = 0; c < nclsts; c++) { 472 375 rc = fat_set_cluster(bs, devmap_handle, fatno, lifo[c], 473 c == 0 ? clst_last1 : lifo[c - 1]);376 c == 0 ? FAT_CLST_LAST1 : lifo[c - 1]); 474 377 if (rc != EOK) 475 378 return rc; … … 501 404 fat_cluster_t *mcl, fat_cluster_t *lcl) 502 405 { 503 fat_cluster_t *lifo; /* stack for storing free cluster numbers */ 504 unsigned found = 0; /* top of the free cluster number stack */ 505 fat_cluster_t clst, value, clst_last1 = FAT_CLST_LAST1(bs); 506 int rc = EOK; 406 block_t *blk; 407 fat_cluster_t *lifo; /* stack for storing free cluster numbers */ 408 unsigned found = 0; /* top of the free cluster number stack */ 409 unsigned b, c, cl; 410 int rc; 507 411 508 412 lifo = (fat_cluster_t *) malloc(nclsts * sizeof(fat_cluster_t)); 509 413 if (!lifo) 510 414 return ENOMEM; 415 511 416 /* 512 417 * Search FAT1 for unused clusters. 513 418 */ 514 419 fibril_mutex_lock(&fat_alloc_lock); 515 for (clst=FAT_CLST_FIRST; clst < CC(bs)+2 && found < nclsts; clst++) { 516 rc = fat_get_cluster(bs, devmap_handle, FAT1, clst, &value); 517 if (rc != EOK) 518 break; 519 520 if (value == FAT_CLST_RES0) { 521 /* 522 * The cluster is free. Put it into our stack 523 * of found clusters and mark it as non-free. 524 */ 525 lifo[found] = clst; 526 rc = fat_set_cluster(bs, devmap_handle, FAT1, clst, 527 (found == 0) ? clst_last1 : lifo[found - 1]); 528 if (rc != EOK) 529 break; 530 531 found++; 420 for (b = 0, cl = 0; b < SF(bs); b++) { 421 rc = block_get(&blk, devmap_handle, RSCNT(bs) + b, 422 BLOCK_FLAGS_NONE); 423 if (rc != EOK) 424 goto error; 425 for (c = 0; c < BPS(bs) / sizeof(fat_cluster_t); c++, cl++) { 426 /* 427 * Check if the entire cluster is physically there. 428 * This check becomes necessary when the file system is 429 * created with fewer total sectors than how many is 430 * inferred from the size of the file allocation table 431 * or when the last cluster ends beyond the end of the 432 * device. 433 */ 434 if ((cl >= FAT_CLST_FIRST) && 435 CLBN2PBN(bs, cl, SPC(bs) - 1) >= TS(bs)) { 436 rc = block_put(blk); 437 if (rc != EOK) 438 goto error; 439 goto out; 440 } 441 442 fat_cluster_t *clst = (fat_cluster_t *)blk->data + c; 443 if (uint16_t_le2host(*clst) == FAT_CLST_RES0) { 444 /* 445 * The cluster is free. Put it into our stack 446 * of found clusters and mark it as non-free. 447 */ 448 lifo[found] = cl; 449 *clst = (found == 0) ? 450 host2uint16_t_le(FAT_CLST_LAST1) : 451 host2uint16_t_le(lifo[found - 1]); 452 blk->dirty = true; /* need to sync block */ 453 if (++found == nclsts) { 454 /* we are almost done */ 455 rc = block_put(blk); 456 if (rc != EOK) 457 goto error; 458 /* update the shadow copies of FAT */ 459 rc = fat_alloc_shadow_clusters(bs, 460 devmap_handle, lifo, nclsts); 461 if (rc != EOK) 462 goto error; 463 *mcl = lifo[found - 1]; 464 *lcl = lifo[0]; 465 free(lifo); 466 fibril_mutex_unlock(&fat_alloc_lock); 467 return EOK; 468 } 469 } 532 470 } 533 } 534 535 if (rc == EOK && found == nclsts) { 536 rc = fat_alloc_shadow_clusters(bs, devmap_handle, lifo, nclsts); 537 if (rc == EOK) { 538 *mcl = lifo[found - 1]; 539 *lcl = lifo[0]; 471 rc = block_put(blk); 472 if (rc != EOK) { 473 error: 474 fibril_mutex_unlock(&fat_alloc_lock); 540 475 free(lifo); 541 fibril_mutex_unlock(&fat_alloc_lock); 542 return EOK; 476 return rc; 543 477 } 544 478 } 545 546 /* If something wrong - free the clusters */ 547 if (found > 0) { 548 while (found--) { 479 out: 480 fibril_mutex_unlock(&fat_alloc_lock); 481 482 /* 483 * We could not find enough clusters. Now we need to free the clusters 484 * we have allocated so far. 485 */ 486 while (found--) { 549 487 rc = fat_set_cluster(bs, devmap_handle, FAT1, lifo[found], 550 488 FAT_CLST_RES0); 489 if (rc != EOK) { 490 free(lifo); 491 return rc; 551 492 } 552 493 } 553 494 554 495 free(lifo); 555 fibril_mutex_unlock(&fat_alloc_lock);556 496 return ENOSPC; 557 497 } … … 569 509 { 570 510 unsigned fatno; 571 fat_cluster_t nextc , clst_bad = FAT_CLST_BAD(bs);511 fat_cluster_t nextc; 572 512 int rc; 573 513 574 514 /* Mark all clusters in the chain as free in all copies of FAT. */ 575 while (firstc < FAT_CLST_LAST1 (bs)) {576 assert(firstc >= FAT_CLST_FIRST && firstc < clst_bad);515 while (firstc < FAT_CLST_LAST1) { 516 assert(firstc >= FAT_CLST_FIRST && firstc < FAT_CLST_BAD); 577 517 rc = fat_get_cluster(bs, devmap_handle, FAT1, firstc, &nextc); 578 518 if (rc != EOK) … … 625 565 626 566 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 627 rc = fat_set_cluster(bs, nodep->idx->devmap_handle, 628 fatno,lastc, mcl);567 rc = fat_set_cluster(bs, nodep->idx->devmap_handle, fatno, 568 lastc, mcl); 629 569 if (rc != EOK) 630 570 return rc; … … 650 590 int fat_chop_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t lcl) 651 591 { 652 fat_cluster_t clst_last1 = FAT_CLST_LAST1(bs);653 592 int rc; 654 593 devmap_handle_t devmap_handle = nodep->idx->devmap_handle; … … 679 618 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 680 619 rc = fat_set_cluster(bs, devmap_handle, fatno, lcl, 681 clst_last1);620 FAT_CLST_LAST1); 682 621 if (rc != EOK) 683 622 return rc; … … 738 677 739 678 /* Check total number of sectors. */ 679 740 680 if (bs->totsec16 == 0 && bs->totsec32 == 0) 741 681 return ENOTSUP; 742 682 743 683 if (bs->totsec16 != 0 && bs->totsec32 != 0 && 744 bs->totsec16 != bs->totsec32) 684 bs->totsec16 != bs->totsec32) 745 685 return ENOTSUP; 746 686 … … 765 705 766 706 /* Check signature of each FAT. */ 707 767 708 for (fat_no = 0; fat_no < bs->fatcnt; fat_no++) { 768 709 rc = fat_get_cluster(bs, devmap_handle, fat_no, 0, &e0); … … 782 723 * set to one. 783 724 */ 784 if (!FAT_IS_FAT12(bs) && ((e0 >> 8) != 0xff || e1 != 0xffff))725 if ((e0 >> 8) != 0xff || e1 != 0xffff) 785 726 return ENOTSUP; 786 727 } … … 791 732 /** 792 733 * @} 793 */ 734 */ -
uspace/srv/fs/fat/fat_fat.h
r3dbe4ca2 r933cadf 29 29 /** @addtogroup fs 30 30 * @{ 31 */ 31 */ 32 32 33 33 #ifndef FAT_FAT_FAT_H_ … … 40 40 #define FAT1 0 41 41 42 #define FAT_CLST_RES0 0x0000 43 #define FAT_CLST_RES1 0x0001 44 #define FAT_CLST_FIRST 0x0002 45 46 #define FAT12_CLST_BAD 0x0ff7 47 #define FAT12_CLST_LAST1 0x0ff8 48 #define FAT12_CLST_LAST8 0x0fff 49 #define FAT16_CLST_BAD 0xfff7 50 #define FAT16_CLST_LAST1 0xfff8 51 #define FAT16_CLST_LAST8 0xffff 52 53 #define FAT12_CLST_MAX 4085 54 #define FAT16_CLST_MAX 65525 42 #define FAT_CLST_RES0 0x0000 43 #define FAT_CLST_RES1 0x0001 44 #define FAT_CLST_FIRST 0x0002 45 #define FAT_CLST_BAD 0xfff7 46 #define FAT_CLST_LAST1 0xfff8 47 #define FAT_CLST_LAST8 0xffff 55 48 56 49 /* internally used to mark root directory's parent */ … … 58 51 /* internally used to mark root directory */ 59 52 #define FAT_CLST_ROOT FAT_CLST_RES1 60 61 /*62 * Convenience macros for computing some frequently used values from the63 * primitive boot sector members.64 */65 #define RDS(bs) ((sizeof(fat_dentry_t) * RDE((bs))) / BPS((bs))) + \66 (((sizeof(fat_dentry_t) * RDE((bs))) % BPS((bs))) != 0)67 #define SSA(bs) (RSCNT((bs)) + FATCNT((bs)) * SF((bs)) + RDS(bs))68 #define DS(bs) (TS(bs) - SSA(bs))69 #define CC(bs) (DS(bs) / SPC(bs))70 71 #define FAT_IS_FAT12(bs) (CC(bs) < FAT12_CLST_MAX)72 #define FAT_IS_FAT16(bs) \73 ((CC(bs) >= FAT12_CLST_MAX) && (CC(bs) < FAT16_CLST_MAX))74 75 #define FAT_CLST_LAST1(bs) \76 (FAT_IS_FAT12(bs) ? FAT12_CLST_LAST1 : FAT16_CLST_LAST1)77 #define FAT_CLST_LAST8(bs) \78 (FAT_IS_FAT12(bs) ? FAT12_CLST_LAST8 : FAT16_CLST_LAST8)79 #define FAT_CLST_BAD(bs) \80 (FAT_IS_FAT12(bs) ? FAT12_CLST_BAD : FAT16_CLST_BAD)81 53 82 54 /* forward declarations */ -
uspace/srv/fs/fat/fat_ops.c
r3dbe4ca2 r933cadf 29 29 /** @addtogroup fs 30 30 * @{ 31 */ 31 */ 32 32 33 33 /** … … 105 105 node->dirty = false; 106 106 node->lastc_cached_valid = false; 107 node->lastc_cached_value = FAT 16_CLST_LAST1;107 node->lastc_cached_value = FAT_CLST_LAST1; 108 108 node->currc_cached_valid = false; 109 109 node->currc_cached_bn = 0; 110 node->currc_cached_value = FAT 16_CLST_LAST1;110 node->currc_cached_value = FAT_CLST_LAST1; 111 111 } 112 112 … … 117 117 fat_dentry_t *d; 118 118 int rc; 119 119 120 120 assert(node->dirty); 121 121 122 122 bs = block_bb_get(node->idx->devmap_handle); 123 123 124 124 /* Read the block that contains the dentry of interest. */ 125 125 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc, … … 137 137 d->attr = FAT_ATTR_SUBDIR; 138 138 } 139 139 140 140 /* TODO: update other fields? (e.g time fields) */ 141 141 142 142 b->dirty = true; /* need to sync block */ 143 143 rc = block_put(b); … … 256 256 fn->data = nodep; 257 257 nodep->bp = fn; 258 258 259 259 *nodepp = nodep; 260 260 return EOK; … … 292 292 * We must instantiate the node from the file system. 293 293 */ 294 294 295 295 assert(idxp->pfc); 296 296 … … 311 311 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs)); 312 312 if (d->attr & FAT_ATTR_SUBDIR) { 313 /* 313 /* 314 314 * The only directory which does not have this bit set is the 315 315 * root directory itself. The root directory node is handled … … 317 317 */ 318 318 nodep->type = FAT_DIRECTORY; 319 320 /* 319 /* 321 320 * Unfortunately, the 'size' field of the FAT dentry is not 322 321 * defined for the directory entry type. We must determine the … … 336 335 nodep->size = uint32_t_le2host(d->size); 337 336 } 338 339 nodep->firstc = uint16_t_le2host(d->firstc); 337 nodep->firstc = uint16_t_le2host(d->firstc); 340 338 nodep->lnkcnt = 1; 341 339 nodep->refcnt = 1; … … 386 384 if (rc != EOK) 387 385 return rc; 388 for (j = 0; j < DPS(bs); j++) { 386 for (j = 0; j < DPS(bs); j++) { 389 387 d = ((fat_dentry_t *)b->data) + j; 390 388 switch (fat_classify_dentry(d)) { … … 524 522 rc = fat_idx_get_new(&idxp, devmap_handle); 525 523 if (rc != EOK) { 526 (void) fat_free_clusters(bs, devmap_handle, mcl); 524 (void) fat_free_clusters(bs, devmap_handle, mcl); 527 525 (void) fat_node_put(FS_NODE(nodep)); 528 526 return rc; … … 621 619 * a new one. 622 620 */ 623 621 624 622 fibril_mutex_lock(&parentp->idx->lock); 625 623 bs = block_bb_get(parentp->idx->devmap_handle); … … 653 651 } 654 652 j = 0; 655 653 656 654 /* 657 655 * We need to grow the parent in order to create a new unused dentry. … … 700 698 rc = block_put(b); 701 699 fibril_mutex_unlock(&parentp->idx->lock); 702 if (rc != EOK) 700 if (rc != EOK) 703 701 return rc; 704 702 705 703 fibril_mutex_lock(&childp->idx->lock); 706 704 707 705 if (childp->type == FAT_DIRECTORY) { 708 706 /* … … 781 779 if (!parentp) 782 780 return EBUSY; 783 781 784 782 rc = fat_has_children(&has_children, cfn); 785 783 if (rc != EOK) … … 797 795 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 798 796 BLOCK_FLAGS_NONE); 799 if (rc != EOK) 797 if (rc != EOK) 800 798 goto error; 801 799 d = (fat_dentry_t *)b->data + … … 842 840 return EOK; 843 841 } 844 842 845 843 fibril_mutex_lock(&nodep->idx->lock); 846 844 bs = block_bb_get(nodep->idx->devmap_handle); … … 850 848 for (i = 0; i < blocks; i++) { 851 849 fat_dentry_t *d; 852 850 853 851 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE); 854 852 if (rc != EOK) { … … 878 876 if (rc != EOK) { 879 877 fibril_mutex_unlock(&nodep->idx->lock); 880 return rc; 878 return rc; 881 879 } 882 880 } … … 953 951 enum cache_mode cmode; 954 952 fat_bs_t *bs; 955 953 956 954 /* Accept the mount options */ 957 955 char *opts; 958 956 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL); 959 957 960 958 if (rc != EOK) { 961 959 async_answer_0(rid, rc); … … 988 986 /* get the buffer with the boot sector */ 989 987 bs = block_bb_get(devmap_handle); 990 988 991 989 if (BPS(bs) != BS_SIZE) { 992 990 block_fini(devmap_handle); … … 1000 998 block_fini(devmap_handle); 1001 999 async_answer_0(rid, rc); 1002 return;1003 }1004 1005 /* Return NOT SUPPORTED if try to mount FAT32 */1006 if (!FAT_IS_FAT12(bs) && !FAT_IS_FAT16(bs)) {1007 block_fini(devmap_handle);1008 async_answer_0(rid, ENOTSUP);1009 1000 return; 1010 1001 } … … 1070 1061 rootp->bp = rfn; 1071 1062 rfn->data = rootp; 1072 1063 1073 1064 fibril_mutex_unlock(&ridxp->lock); 1074 1065 … … 1104 1095 return; 1105 1096 } 1106 1097 1107 1098 /* 1108 1099 * Put the root node and force it to the FAT free node list. … … 1285 1276 int flags = BLOCK_FLAGS_NONE; 1286 1277 int rc; 1287 1278 1288 1279 rc = fat_node_get(&fn, devmap_handle, index); 1289 1280 if (rc != EOK) { … … 1296 1287 } 1297 1288 nodep = FAT_NODE(fn); 1298 1289 1299 1290 ipc_callid_t callid; 1300 1291 size_t len; … … 1313 1304 * but this one greatly simplifies fat_write(). Note that we can afford 1314 1305 * to do this because the client must be ready to handle the return 1315 * value signalizing a smaller number of bytes written. 1316 */ 1306 * value signalizing a smaller number of bytes written. 1307 */ 1317 1308 bytes = min(len, BPS(bs) - pos % BPS(bs)); 1318 1309 if (bytes == BPS(bs)) 1319 1310 flags |= BLOCK_FLAGS_NOREAD; 1320 1311 1321 1312 boundary = ROUND_UP(nodep->size, BPC(bs)); 1322 1313 if (pos < boundary) { … … 1364 1355 */ 1365 1356 unsigned nclsts; 1366 fat_cluster_t mcl, lcl; 1367 1357 fat_cluster_t mcl, lcl; 1358 1368 1359 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs); 1369 1360 /* create an independent chain of nclsts clusters in all FATs */ … … 1461 1452 nodep->size = size; 1462 1453 nodep->dirty = true; /* need to sync node */ 1463 rc = EOK; 1454 rc = EOK; 1464 1455 } else { 1465 1456 /* … … 1482 1473 nodep->size = size; 1483 1474 nodep->dirty = true; /* need to sync node */ 1484 rc = EOK; 1475 rc = EOK; 1485 1476 } 1486 1477 out: … … 1538 1529 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1539 1530 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1540 1531 1541 1532 fs_node_t *fn; 1542 1533 int rc = fat_node_get(&fn, devmap_handle, index); … … 1549 1540 return; 1550 1541 } 1551 1542 1552 1543 fat_node_t *nodep = FAT_NODE(fn); 1553 1544 1554 1545 nodep->dirty = true; 1555 1546 rc = fat_node_sync(nodep); 1556 1547 1557 1548 fat_node_put(fn); 1558 1549 async_answer_0(rid, rc);
Note:
See TracChangeset
for help on using the changeset viewer.