Changes in / [933cadf:3dbe4ca2] in mainline
- Location:
- uspace/srv/fs/fat
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/fs/fat/fat_fat.c
r933cadf r3dbe4ca2 29 29 /** @addtogroup fs 30 30 * @{ 31 */ 31 */ 32 32 33 33 /** … … 54 54 * primitive boot sector members. 55 55 */ 56 #define RDS(bs) ((sizeof(fat_dentry_t) * RDE((bs))) / BPS((bs))) + \57 (((sizeof(fat_dentry_t) * RDE((bs))) % BPS((bs))) != 0)58 #define SSA(bs) (RSCNT((bs)) + FATCNT((bs)) * SF((bs)) + RDS(bs))59 60 56 #define CLBN2PBN(bs, cl, bn) \ 61 57 (SSA((bs)) + ((cl) - FAT_CLST_FIRST) * SPC((bs)) + (bn) % SPC((bs))) … … 65 61 * during allocation of clusters. The lock does not have to be held durring 66 62 * deallocation of clusters. 67 */ 63 */ 68 64 static FIBRIL_MUTEX_INITIALIZE(fat_alloc_lock); 69 65 … … 77 73 * @param numc If non-NULL, output argument holding the number of 78 74 * clusters seen during the walk. 79 * @param max_clusters Maximum number of clusters to visit. 75 * @param max_clusters Maximum number of clusters to visit. 80 76 * 81 77 * @return EOK on success or a negative error code. 82 78 */ 83 int 79 int 84 80 fat_cluster_walk(fat_bs_t *bs, devmap_handle_t devmap_handle, fat_cluster_t firstc, 85 81 fat_cluster_t *lastc, uint16_t *numc, uint16_t max_clusters) 86 82 { 87 block_t *b;88 83 uint16_t clusters = 0; 89 fat_cluster_t clst = firstc; 84 fat_cluster_t clst = firstc, clst_last1 = FAT_CLST_LAST1(bs); 85 fat_cluster_t clst_bad = FAT_CLST_BAD(bs); 90 86 int rc; 91 87 … … 99 95 } 100 96 101 while (clst < FAT_CLST_LAST1 && clusters < max_clusters) { 102 aoff64_t fsec; /* sector offset relative to FAT1 */ 103 unsigned fidx; /* FAT1 entry index */ 104 97 while (clst < clst_last1 && clusters < max_clusters) { 105 98 assert(clst >= FAT_CLST_FIRST); 106 99 if (lastc) 107 100 *lastc = clst; /* remember the last cluster number */ 108 fsec = (clst * sizeof(fat_cluster_t)) / BPS(bs); 109 fidx = clst % (BPS(bs) / sizeof(fat_cluster_t)); 101 110 102 /* read FAT1 */ 111 rc = block_get(&b, devmap_handle, RSCNT(bs) + fsec, 112 BLOCK_FLAGS_NONE); 113 if (rc != EOK) 114 return rc; 115 clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]); 116 assert(clst != FAT_CLST_BAD); 117 rc = block_put(b); 118 if (rc != EOK) 119 return rc; 103 rc = fat_get_cluster(bs, devmap_handle, FAT1, clst, &clst); 104 if (rc != EOK) 105 return rc; 106 107 assert(clst != clst_bad); 120 108 clusters++; 121 109 } 122 110 123 if (lastc && clst < FAT_CLST_LAST1)111 if (lastc && clst < clst_last1) 124 112 *lastc = clst; 125 113 if (numc) … … 151 139 return ELIMIT; 152 140 153 if (nodep->firstc == FAT_CLST_ROOT) 141 if (nodep->firstc == FAT_CLST_ROOT) 154 142 goto fall_through; 155 143 … … 178 166 if (rc != EOK) 179 167 return rc; 180 168 181 169 /* 182 170 * Update the "current" cluster cache. … … 198 186 * @param clp If not NULL, address where the cluster containing bn 199 187 * will be stored. 200 * stored 188 * stored 201 189 * @param bn Block number. 202 190 * @param flags Flags passed to libblock. … … 275 263 return rc; 276 264 } 277 265 278 266 if (o >= pos) 279 267 return EOK; 280 268 281 269 /* zero out the initial part of the new cluster chain */ 282 270 for (o = boundary; o < pos; o += BPS(bs)) { … … 308 296 fat_cluster_t clst, fat_cluster_t *value) 309 297 { 310 block_t *b; 311 fat_cluster_t *cp; 312 int rc; 298 block_t *b, *b1; 299 aoff64_t offset; 300 int rc; 301 302 assert(fatno < FATCNT(bs)); 303 304 if (FAT_IS_FAT12(bs)) 305 offset = (clst + clst/2); 306 else 307 offset = (clst * sizeof(fat_cluster_t)); 313 308 314 309 rc = block_get(&b, devmap_handle, RSCNT(bs) + SF(bs) * fatno + 315 (clst * sizeof(fat_cluster_t))/ BPS(bs), BLOCK_FLAGS_NONE);310 offset / BPS(bs), BLOCK_FLAGS_NONE); 316 311 if (rc != EOK) 317 312 return rc; 318 cp = (fat_cluster_t *)b->data + 319 clst % (BPS(bs) / sizeof(fat_cluster_t)); 320 *value = uint16_t_le2host(*cp); 313 314 /* This cluster access spans a sector boundary. Check only for FAT12 */ 315 if (FAT_IS_FAT12(bs) && (offset % BPS(bs)+1 == BPS(bs))) { 316 /* Is it last sector of FAT? */ 317 if (offset / BPS(bs) < SF(bs)) { 318 /* No. Reading next sector */ 319 rc = block_get(&b1, devmap_handle, 1 + RSCNT(bs) + 320 SF(bs)*fatno + offset / BPS(bs), BLOCK_FLAGS_NONE); 321 if (rc != EOK) { 322 block_put(b); 323 return rc; 324 } 325 /* 326 * Combining value with last byte of current sector and 327 * first byte of next sector 328 */ 329 *value = *(uint8_t *)(b->data + BPS(bs) - 1); 330 *value |= *(uint8_t *)(b1->data); 331 332 rc = block_put(b1); 333 if (rc != EOK) { 334 block_put(b); 335 return rc; 336 } 337 } 338 else { 339 /* Yes. It is last sector of FAT */ 340 block_put(b); 341 return ERANGE; 342 } 343 } 344 else 345 *value = *(fat_cluster_t *)(b->data + offset % BPS(bs)); 346 347 if (FAT_IS_FAT12(bs)) { 348 if (clst & 0x0001) 349 *value = (*value) >> 4; 350 else 351 *value = (*value) & 0x0fff; 352 } 353 354 *value = uint16_t_le2host(*value); 321 355 rc = block_put(b); 322 356 323 357 return rc; 324 358 } … … 338 372 fat_cluster_t clst, fat_cluster_t value) 339 373 { 340 block_t *b; 341 fat_cluster_t *cp; 342 int rc; 374 block_t *b, *b1; 375 aoff64_t offset; 376 fat_cluster_t *cp, temp; 377 int rc; 378 int spans = 0; 343 379 344 380 assert(fatno < FATCNT(bs)); 381 382 if (FAT_IS_FAT12(bs)) 383 offset = (clst + clst/2); 384 else 385 offset = (clst * sizeof(fat_cluster_t)); 386 345 387 rc = block_get(&b, devmap_handle, RSCNT(bs) + SF(bs) * fatno + 346 (clst * sizeof(fat_cluster_t))/ BPS(bs), BLOCK_FLAGS_NONE);388 offset / BPS(bs), BLOCK_FLAGS_NONE); 347 389 if (rc != EOK) 348 390 return rc; 349 cp = (fat_cluster_t *)b->data + 350 clst % (BPS(bs) / sizeof(fat_cluster_t)); 351 *cp = host2uint16_t_le(value); 352 b->dirty = true; /* need to sync block */ 391 392 /* This cluster access spans a sector boundary. Check only for FAT12 */ 393 if (FAT_IS_FAT12(bs) && (offset % BPS(bs)+1 == BPS(bs))) { 394 /* Is it last sector of FAT? */ 395 if (offset / BPS(bs) < SF(bs)) { 396 /* No. Reading next sector */ 397 rc = block_get(&b1, devmap_handle, 1 + RSCNT(bs) + 398 SF(bs)*fatno + offset / BPS(bs), BLOCK_FLAGS_NONE); 399 if (rc != EOK) { 400 block_put(b); 401 return rc; 402 } 403 /* 404 * Combining value with last byte of current sector and 405 * first byte of next sector 406 */ 407 spans=1; 408 cp = &temp; 409 *cp = *(uint8_t *)(b->data + BPS(bs) - 1); 410 *cp |= *(uint8_t *)(b1->data); 411 } 412 else { 413 /* Yes. It is last sector of fat */ 414 block_put(b); 415 return ERANGE; 416 } 417 } 418 else 419 cp = (fat_cluster_t *)(b->data + offset % BPS(bs)); 420 421 value = host2uint16_t_le(value); 422 if (FAT_IS_FAT12(bs)) { 423 if (clst & 0x0001) { 424 *cp &= 0x000f; 425 *cp |= value << 4; 426 } 427 else { 428 *cp &= 0xf000; 429 *cp |= value & 0x0fff; 430 } 431 432 if (spans) 433 { 434 *(uint8_t *)(b->data + BPS(bs) - 1) = cp[0]; 435 *(uint8_t *)(b1->data) = cp[1]; 436 437 b1->dirty = true; 438 rc = block_put(b1); 439 if (rc != EOK) { 440 block_put(b); 441 return rc; 442 } 443 } 444 } 445 else 446 *cp = value; 447 448 b->dirty = true; /* need to sync block */ 353 449 rc = block_put(b); 354 450 return rc; … … 369 465 uint8_t fatno; 370 466 unsigned c; 467 fat_cluster_t clst_last1 = FAT_CLST_LAST1(bs); 371 468 int rc; 372 469 … … 374 471 for (c = 0; c < nclsts; c++) { 375 472 rc = fat_set_cluster(bs, devmap_handle, fatno, lifo[c], 376 c == 0 ? FAT_CLST_LAST1 : lifo[c - 1]);473 c == 0 ? clst_last1 : lifo[c - 1]); 377 474 if (rc != EOK) 378 475 return rc; … … 404 501 fat_cluster_t *mcl, fat_cluster_t *lcl) 405 502 { 406 block_t *blk; 407 fat_cluster_t *lifo; /* stack for storing free cluster numbers */ 408 unsigned found = 0; /* top of the free cluster number stack */ 409 unsigned b, c, cl; 410 int rc; 503 fat_cluster_t *lifo; /* stack for storing free cluster numbers */ 504 unsigned found = 0; /* top of the free cluster number stack */ 505 fat_cluster_t clst, value, clst_last1 = FAT_CLST_LAST1(bs); 506 int rc = EOK; 411 507 412 508 lifo = (fat_cluster_t *) malloc(nclsts * sizeof(fat_cluster_t)); 413 509 if (!lifo) 414 510 return ENOMEM; 415 416 511 /* 417 512 * Search FAT1 for unused clusters. 418 513 */ 419 514 fibril_mutex_lock(&fat_alloc_lock); 420 for (b = 0, cl = 0; b < SF(bs); b++) { 421 rc = block_get(&blk, devmap_handle, RSCNT(bs) + b, 422 BLOCK_FLAGS_NONE); 423 if (rc != EOK) 424 goto error; 425 for (c = 0; c < BPS(bs) / sizeof(fat_cluster_t); c++, cl++) { 426 /* 427 * Check if the entire cluster is physically there. 428 * This check becomes necessary when the file system is 429 * created with fewer total sectors than how many is 430 * inferred from the size of the file allocation table 431 * or when the last cluster ends beyond the end of the 432 * device. 433 */ 434 if ((cl >= FAT_CLST_FIRST) && 435 CLBN2PBN(bs, cl, SPC(bs) - 1) >= TS(bs)) { 436 rc = block_put(blk); 437 if (rc != EOK) 438 goto error; 439 goto out; 440 } 441 442 fat_cluster_t *clst = (fat_cluster_t *)blk->data + c; 443 if (uint16_t_le2host(*clst) == FAT_CLST_RES0) { 444 /* 445 * The cluster is free. Put it into our stack 446 * of found clusters and mark it as non-free. 447 */ 448 lifo[found] = cl; 449 *clst = (found == 0) ? 450 host2uint16_t_le(FAT_CLST_LAST1) : 451 host2uint16_t_le(lifo[found - 1]); 452 blk->dirty = true; /* need to sync block */ 453 if (++found == nclsts) { 454 /* we are almost done */ 455 rc = block_put(blk); 456 if (rc != EOK) 457 goto error; 458 /* update the shadow copies of FAT */ 459 rc = fat_alloc_shadow_clusters(bs, 460 devmap_handle, lifo, nclsts); 461 if (rc != EOK) 462 goto error; 463 *mcl = lifo[found - 1]; 464 *lcl = lifo[0]; 465 free(lifo); 466 fibril_mutex_unlock(&fat_alloc_lock); 467 return EOK; 468 } 469 } 470 } 471 rc = block_put(blk); 472 if (rc != EOK) { 473 error: 515 for (clst=FAT_CLST_FIRST; clst < CC(bs)+2 && found < nclsts; clst++) { 516 rc = fat_get_cluster(bs, devmap_handle, FAT1, clst, &value); 517 if (rc != EOK) 518 break; 519 520 if (value == FAT_CLST_RES0) { 521 /* 522 * The cluster is free. Put it into our stack 523 * of found clusters and mark it as non-free. 524 */ 525 lifo[found] = clst; 526 rc = fat_set_cluster(bs, devmap_handle, FAT1, clst, 527 (found == 0) ? clst_last1 : lifo[found - 1]); 528 if (rc != EOK) 529 break; 530 531 found++; 532 } 533 } 534 535 if (rc == EOK && found == nclsts) { 536 rc = fat_alloc_shadow_clusters(bs, devmap_handle, lifo, nclsts); 537 if (rc == EOK) { 538 *mcl = lifo[found - 1]; 539 *lcl = lifo[0]; 540 free(lifo); 474 541 fibril_mutex_unlock(&fat_alloc_lock); 475 free(lifo); 476 return rc; 477 } 478 } 479 out: 480 fibril_mutex_unlock(&fat_alloc_lock); 481 482 /* 483 * We could not find enough clusters. Now we need to free the clusters 484 * we have allocated so far. 485 */ 486 while (found--) { 542 return EOK; 543 } 544 } 545 546 /* If something wrong - free the clusters */ 547 if (found > 0) { 548 while (found--) { 487 549 rc = fat_set_cluster(bs, devmap_handle, FAT1, lifo[found], 488 550 FAT_CLST_RES0); 489 if (rc != EOK) { 490 free(lifo); 491 return rc; 492 } 493 } 494 551 } 552 } 553 495 554 free(lifo); 555 fibril_mutex_unlock(&fat_alloc_lock); 496 556 return ENOSPC; 497 557 } … … 509 569 { 510 570 unsigned fatno; 511 fat_cluster_t nextc ;571 fat_cluster_t nextc, clst_bad = FAT_CLST_BAD(bs); 512 572 int rc; 513 573 514 574 /* Mark all clusters in the chain as free in all copies of FAT. */ 515 while (firstc < FAT_CLST_LAST1 ) {516 assert(firstc >= FAT_CLST_FIRST && firstc < FAT_CLST_BAD);575 while (firstc < FAT_CLST_LAST1(bs)) { 576 assert(firstc >= FAT_CLST_FIRST && firstc < clst_bad); 517 577 rc = fat_get_cluster(bs, devmap_handle, FAT1, firstc, &nextc); 518 578 if (rc != EOK) … … 565 625 566 626 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 567 rc = fat_set_cluster(bs, nodep->idx->devmap_handle, fatno,568 lastc, mcl);627 rc = fat_set_cluster(bs, nodep->idx->devmap_handle, 628 fatno, lastc, mcl); 569 629 if (rc != EOK) 570 630 return rc; … … 590 650 int fat_chop_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t lcl) 591 651 { 652 fat_cluster_t clst_last1 = FAT_CLST_LAST1(bs); 592 653 int rc; 593 654 devmap_handle_t devmap_handle = nodep->idx->devmap_handle; … … 618 679 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 619 680 rc = fat_set_cluster(bs, devmap_handle, fatno, lcl, 620 FAT_CLST_LAST1);681 clst_last1); 621 682 if (rc != EOK) 622 683 return rc; … … 677 738 678 739 /* Check total number of sectors. */ 679 680 740 if (bs->totsec16 == 0 && bs->totsec32 == 0) 681 741 return ENOTSUP; 682 742 683 743 if (bs->totsec16 != 0 && bs->totsec32 != 0 && 684 bs->totsec16 != bs->totsec32) 744 bs->totsec16 != bs->totsec32) 685 745 return ENOTSUP; 686 746 … … 705 765 706 766 /* Check signature of each FAT. */ 707 708 767 for (fat_no = 0; fat_no < bs->fatcnt; fat_no++) { 709 768 rc = fat_get_cluster(bs, devmap_handle, fat_no, 0, &e0); … … 723 782 * set to one. 724 783 */ 725 if ((e0 >> 8) != 0xff || e1 != 0xffff)784 if (!FAT_IS_FAT12(bs) && ((e0 >> 8) != 0xff || e1 != 0xffff)) 726 785 return ENOTSUP; 727 786 } … … 732 791 /** 733 792 * @} 734 */ 793 */ -
uspace/srv/fs/fat/fat_fat.h
r933cadf r3dbe4ca2 29 29 /** @addtogroup fs 30 30 * @{ 31 */ 31 */ 32 32 33 33 #ifndef FAT_FAT_FAT_H_ … … 40 40 #define FAT1 0 41 41 42 #define FAT_CLST_RES0 0x0000 43 #define FAT_CLST_RES1 0x0001 44 #define FAT_CLST_FIRST 0x0002 45 #define FAT_CLST_BAD 0xfff7 46 #define FAT_CLST_LAST1 0xfff8 47 #define FAT_CLST_LAST8 0xffff 42 #define FAT_CLST_RES0 0x0000 43 #define FAT_CLST_RES1 0x0001 44 #define FAT_CLST_FIRST 0x0002 45 46 #define FAT12_CLST_BAD 0x0ff7 47 #define FAT12_CLST_LAST1 0x0ff8 48 #define FAT12_CLST_LAST8 0x0fff 49 #define FAT16_CLST_BAD 0xfff7 50 #define FAT16_CLST_LAST1 0xfff8 51 #define FAT16_CLST_LAST8 0xffff 52 53 #define FAT12_CLST_MAX 4085 54 #define FAT16_CLST_MAX 65525 48 55 49 56 /* internally used to mark root directory's parent */ … … 51 58 /* internally used to mark root directory */ 52 59 #define FAT_CLST_ROOT FAT_CLST_RES1 60 61 /* 62 * Convenience macros for computing some frequently used values from the 63 * primitive boot sector members. 64 */ 65 #define RDS(bs) ((sizeof(fat_dentry_t) * RDE((bs))) / BPS((bs))) + \ 66 (((sizeof(fat_dentry_t) * RDE((bs))) % BPS((bs))) != 0) 67 #define SSA(bs) (RSCNT((bs)) + FATCNT((bs)) * SF((bs)) + RDS(bs)) 68 #define DS(bs) (TS(bs) - SSA(bs)) 69 #define CC(bs) (DS(bs) / SPC(bs)) 70 71 #define FAT_IS_FAT12(bs) (CC(bs) < FAT12_CLST_MAX) 72 #define FAT_IS_FAT16(bs) \ 73 ((CC(bs) >= FAT12_CLST_MAX) && (CC(bs) < FAT16_CLST_MAX)) 74 75 #define FAT_CLST_LAST1(bs) \ 76 (FAT_IS_FAT12(bs) ? FAT12_CLST_LAST1 : FAT16_CLST_LAST1) 77 #define FAT_CLST_LAST8(bs) \ 78 (FAT_IS_FAT12(bs) ? FAT12_CLST_LAST8 : FAT16_CLST_LAST8) 79 #define FAT_CLST_BAD(bs) \ 80 (FAT_IS_FAT12(bs) ? FAT12_CLST_BAD : FAT16_CLST_BAD) 53 81 54 82 /* forward declarations */ -
uspace/srv/fs/fat/fat_ops.c
r933cadf r3dbe4ca2 29 29 /** @addtogroup fs 30 30 * @{ 31 */ 31 */ 32 32 33 33 /** … … 105 105 node->dirty = false; 106 106 node->lastc_cached_valid = false; 107 node->lastc_cached_value = FAT _CLST_LAST1;107 node->lastc_cached_value = FAT16_CLST_LAST1; 108 108 node->currc_cached_valid = false; 109 109 node->currc_cached_bn = 0; 110 node->currc_cached_value = FAT _CLST_LAST1;110 node->currc_cached_value = FAT16_CLST_LAST1; 111 111 } 112 112 … … 117 117 fat_dentry_t *d; 118 118 int rc; 119 119 120 120 assert(node->dirty); 121 121 122 122 bs = block_bb_get(node->idx->devmap_handle); 123 123 124 124 /* Read the block that contains the dentry of interest. */ 125 125 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc, … … 137 137 d->attr = FAT_ATTR_SUBDIR; 138 138 } 139 139 140 140 /* TODO: update other fields? (e.g time fields) */ 141 141 142 142 b->dirty = true; /* need to sync block */ 143 143 rc = block_put(b); … … 256 256 fn->data = nodep; 257 257 nodep->bp = fn; 258 258 259 259 *nodepp = nodep; 260 260 return EOK; … … 292 292 * We must instantiate the node from the file system. 293 293 */ 294 294 295 295 assert(idxp->pfc); 296 296 … … 311 311 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs)); 312 312 if (d->attr & FAT_ATTR_SUBDIR) { 313 /* 313 /* 314 314 * The only directory which does not have this bit set is the 315 315 * root directory itself. The root directory node is handled … … 317 317 */ 318 318 nodep->type = FAT_DIRECTORY; 319 /* 319 320 /* 320 321 * Unfortunately, the 'size' field of the FAT dentry is not 321 322 * defined for the directory entry type. We must determine the … … 335 336 nodep->size = uint32_t_le2host(d->size); 336 337 } 337 nodep->firstc = uint16_t_le2host(d->firstc); 338 339 nodep->firstc = uint16_t_le2host(d->firstc); 338 340 nodep->lnkcnt = 1; 339 341 nodep->refcnt = 1; … … 384 386 if (rc != EOK) 385 387 return rc; 386 for (j = 0; j < DPS(bs); j++) { 388 for (j = 0; j < DPS(bs); j++) { 387 389 d = ((fat_dentry_t *)b->data) + j; 388 390 switch (fat_classify_dentry(d)) { … … 522 524 rc = fat_idx_get_new(&idxp, devmap_handle); 523 525 if (rc != EOK) { 524 (void) fat_free_clusters(bs, devmap_handle, mcl); 526 (void) fat_free_clusters(bs, devmap_handle, mcl); 525 527 (void) fat_node_put(FS_NODE(nodep)); 526 528 return rc; … … 619 621 * a new one. 620 622 */ 621 623 622 624 fibril_mutex_lock(&parentp->idx->lock); 623 625 bs = block_bb_get(parentp->idx->devmap_handle); … … 651 653 } 652 654 j = 0; 653 655 654 656 /* 655 657 * We need to grow the parent in order to create a new unused dentry. … … 698 700 rc = block_put(b); 699 701 fibril_mutex_unlock(&parentp->idx->lock); 700 if (rc != EOK) 702 if (rc != EOK) 701 703 return rc; 702 704 703 705 fibril_mutex_lock(&childp->idx->lock); 704 706 705 707 if (childp->type == FAT_DIRECTORY) { 706 708 /* … … 779 781 if (!parentp) 780 782 return EBUSY; 781 783 782 784 rc = fat_has_children(&has_children, cfn); 783 785 if (rc != EOK) … … 795 797 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 796 798 BLOCK_FLAGS_NONE); 797 if (rc != EOK) 799 if (rc != EOK) 798 800 goto error; 799 801 d = (fat_dentry_t *)b->data + … … 840 842 return EOK; 841 843 } 842 844 843 845 fibril_mutex_lock(&nodep->idx->lock); 844 846 bs = block_bb_get(nodep->idx->devmap_handle); … … 848 850 for (i = 0; i < blocks; i++) { 849 851 fat_dentry_t *d; 850 852 851 853 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE); 852 854 if (rc != EOK) { … … 876 878 if (rc != EOK) { 877 879 fibril_mutex_unlock(&nodep->idx->lock); 878 return rc; 880 return rc; 879 881 } 880 882 } … … 951 953 enum cache_mode cmode; 952 954 fat_bs_t *bs; 953 955 954 956 /* Accept the mount options */ 955 957 char *opts; 956 958 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL); 957 959 958 960 if (rc != EOK) { 959 961 async_answer_0(rid, rc); … … 986 988 /* get the buffer with the boot sector */ 987 989 bs = block_bb_get(devmap_handle); 988 990 989 991 if (BPS(bs) != BS_SIZE) { 990 992 block_fini(devmap_handle); … … 998 1000 block_fini(devmap_handle); 999 1001 async_answer_0(rid, rc); 1002 return; 1003 } 1004 1005 /* Return NOT SUPPORTED if try to mount FAT32 */ 1006 if (!FAT_IS_FAT12(bs) && !FAT_IS_FAT16(bs)) { 1007 block_fini(devmap_handle); 1008 async_answer_0(rid, ENOTSUP); 1000 1009 return; 1001 1010 } … … 1061 1070 rootp->bp = rfn; 1062 1071 rfn->data = rootp; 1063 1072 1064 1073 fibril_mutex_unlock(&ridxp->lock); 1065 1074 … … 1095 1104 return; 1096 1105 } 1097 1106 1098 1107 /* 1099 1108 * Put the root node and force it to the FAT free node list. … … 1276 1285 int flags = BLOCK_FLAGS_NONE; 1277 1286 int rc; 1278 1287 1279 1288 rc = fat_node_get(&fn, devmap_handle, index); 1280 1289 if (rc != EOK) { … … 1287 1296 } 1288 1297 nodep = FAT_NODE(fn); 1289 1298 1290 1299 ipc_callid_t callid; 1291 1300 size_t len; … … 1304 1313 * but this one greatly simplifies fat_write(). Note that we can afford 1305 1314 * to do this because the client must be ready to handle the return 1306 * value signalizing a smaller number of bytes written. 1307 */ 1315 * value signalizing a smaller number of bytes written. 1316 */ 1308 1317 bytes = min(len, BPS(bs) - pos % BPS(bs)); 1309 1318 if (bytes == BPS(bs)) 1310 1319 flags |= BLOCK_FLAGS_NOREAD; 1311 1320 1312 1321 boundary = ROUND_UP(nodep->size, BPC(bs)); 1313 1322 if (pos < boundary) { … … 1355 1364 */ 1356 1365 unsigned nclsts; 1357 fat_cluster_t mcl, lcl; 1358 1366 fat_cluster_t mcl, lcl; 1367 1359 1368 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs); 1360 1369 /* create an independent chain of nclsts clusters in all FATs */ … … 1452 1461 nodep->size = size; 1453 1462 nodep->dirty = true; /* need to sync node */ 1454 rc = EOK; 1463 rc = EOK; 1455 1464 } else { 1456 1465 /* … … 1473 1482 nodep->size = size; 1474 1483 nodep->dirty = true; /* need to sync node */ 1475 rc = EOK; 1484 rc = EOK; 1476 1485 } 1477 1486 out: … … 1529 1538 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1530 1539 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1531 1540 1532 1541 fs_node_t *fn; 1533 1542 int rc = fat_node_get(&fn, devmap_handle, index); … … 1540 1549 return; 1541 1550 } 1542 1551 1543 1552 fat_node_t *nodep = FAT_NODE(fn); 1544 1553 1545 1554 nodep->dirty = true; 1546 1555 rc = fat_node_sync(nodep); 1547 1556 1548 1557 fat_node_put(fn); 1549 1558 async_answer_0(rid, rc);
Note:
See TracChangeset
for help on using the changeset viewer.