Changeset 9a1d8ab in mainline
- Timestamp:
- 2010-07-28T15:27:13Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0b749a3, 482dde7, c0e1be7
- Parents:
- 14f2100 (diff), dba4a23 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- uspace/srv/fs/fat
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/fs/fat/fat.h
r14f2100 r9a1d8ab 48 48 49 49 #define min(a, b) ((a) < (b) ? (a) : (b)) 50 51 /* 52 * Convenience macros for accessing some frequently used boot sector members. 53 */ 54 #define BPS(bs) uint16_t_le2host((bs)->bps) 55 #define SPC(bs) (bs)->spc 56 #define RSCNT(bs) uint16_t_le2host((bs)->rscnt) 57 #define FATCNT(bs) (bs)->fatcnt 58 #define SF(bs) uint16_t_le2host((bs)->sec_per_fat) 59 #define RDE(bs) uint16_t_le2host((bs)->root_ent_max) 60 #define TS(bs) (uint16_t_le2host((bs)->totsec16) != 0 ? \ 61 uint16_t_le2host((bs)->totsec16) : \ 62 uint32_t_le2host(bs->totsec32)) 50 63 51 64 #define BS_BLOCK 0 … … 198 211 unsigned refcnt; 199 212 bool dirty; 213 214 /* 215 * Cache of the node's last and "current" cluster to avoid some 216 * unnecessary FAT walks. 217 */ 218 /* Node's last cluster in FAT. */ 219 bool lastc_cached_valid; 220 fat_cluster_t lastc_cached_value; 221 /* Node's "current" cluster, i.e. where the last I/O took place. */ 222 bool currc_cached_valid; 223 aoff64_t currc_cached_bn; 224 fat_cluster_t currc_cached_value; 200 225 } fat_node_t; 201 226 -
uspace/srv/fs/fat/fat_fat.c
r14f2100 r9a1d8ab 49 49 #include <mem.h> 50 50 51 /* 52 * Convenience macros for computing some frequently used values from the 53 * primitive boot sector members. 54 */ 55 #define RDS(bs) ((sizeof(fat_dentry_t) * RDE((bs))) / BPS((bs))) + \ 56 (((sizeof(fat_dentry_t) * RDE((bs))) % BPS((bs))) != 0) 57 #define SSA(bs) (RSCNT((bs)) + FATCNT((bs)) * SF((bs)) + RDS(bs)) 58 59 #define CLBN2PBN(bs, cl, bn) \ 60 (SSA((bs)) + ((cl) - FAT_CLST_FIRST) * SPC((bs)) + (bn) % SPC((bs))) 61 51 62 /** 52 63 * The fat_alloc_lock mutex protects all copies of the File Allocation Table … … 74 85 { 75 86 block_t *b; 76 unsigned bps;77 unsigned rscnt; /* block address of the first FAT */78 87 uint16_t clusters = 0; 79 88 fat_cluster_t clst = firstc; 80 89 int rc; 81 82 bps = uint16_t_le2host(bs->bps);83 rscnt = uint16_t_le2host(bs->rscnt);84 90 85 91 if (firstc == FAT_CLST_RES0) { … … 99 105 if (lastc) 100 106 *lastc = clst; /* remember the last cluster number */ 101 fsec = (clst * sizeof(fat_cluster_t)) / bps;102 fidx = clst % ( bps/ sizeof(fat_cluster_t));107 fsec = (clst * sizeof(fat_cluster_t)) / BPS(bs); 108 fidx = clst % (BPS(bs) / sizeof(fat_cluster_t)); 103 109 /* read FAT1 */ 104 rc = block_get(&b, dev_handle, rscnt + fsec, BLOCK_FLAGS_NONE); 110 rc = block_get(&b, dev_handle, RSCNT(bs) + fsec, 111 BLOCK_FLAGS_NONE); 105 112 if (rc != EOK) 106 113 return rc; … … 125 132 * @param block Pointer to a block pointer for storing result. 126 133 * @param bs Buffer holding the boot sector of the file system. 127 * @param dev_handle Device handle of the file system. 128 * @param firstc First cluster used by the file. Can be zero if the file 129 * is empty. 134 * @param nodep FAT node. 130 135 * @param bn Block number. 131 136 * @param flags Flags passed to libblock. … … 134 139 */ 135 140 int 141 fat_block_get(block_t **block, struct fat_bs *bs, fat_node_t *nodep, 142 aoff64_t bn, int flags) 143 { 144 fat_cluster_t firstc = nodep->firstc; 145 fat_cluster_t currc; 146 aoff64_t relbn = bn; 147 int rc; 148 149 if (!nodep->size) 150 return ELIMIT; 151 152 if (nodep->firstc == FAT_CLST_ROOT) 153 goto fall_through; 154 155 if (((((nodep->size - 1) / BPS(bs)) / SPC(bs)) == bn / SPC(bs)) && 156 nodep->lastc_cached_valid) { 157 /* 158 * This is a request to read a block within the last cluster 159 * when fortunately we have the last cluster number cached. 160 */ 161 return block_get(block, nodep->idx->dev_handle, 162 CLBN2PBN(bs, nodep->lastc_cached_value, bn), flags); 163 } 164 165 if (nodep->currc_cached_valid && bn >= nodep->currc_cached_bn) { 166 /* 167 * We can start with the cluster cached by the previous call to 168 * fat_block_get(). 169 */ 170 firstc = nodep->currc_cached_value; 171 relbn -= (nodep->currc_cached_bn / SPC(bs)) * SPC(bs); 172 } 173 174 fall_through: 175 rc = _fat_block_get(block, bs, nodep->idx->dev_handle, firstc, 176 &currc, relbn, flags); 177 if (rc != EOK) 178 return rc; 179 180 /* 181 * Update the "current" cluster cache. 182 */ 183 nodep->currc_cached_valid = true; 184 nodep->currc_cached_bn = bn; 185 nodep->currc_cached_value = currc; 186 187 return rc; 188 } 189 190 /** Read block from file located on a FAT file system. 191 * 192 * @param block Pointer to a block pointer for storing result. 193 * @param bs Buffer holding the boot sector of the file system. 194 * @param dev_handle Device handle of the file system. 195 * @param fcl First cluster used by the file. Can be zero if the file 196 * is empty. 197 * @param clp If not NULL, address where the cluster containing bn 198 * will be stored. 199 * stored 200 * @param bn Block number. 201 * @param flags Flags passed to libblock. 202 * 203 * @return EOK on success or a negative error code. 204 */ 205 int 136 206 _fat_block_get(block_t **block, fat_bs_t *bs, dev_handle_t dev_handle, 137 fat_cluster_t firstc, aoff64_t bn, int flags) 138 { 139 unsigned bps; 140 unsigned rscnt; /* block address of the first FAT */ 141 unsigned rde; 142 unsigned rds; /* root directory size */ 143 unsigned sf; 144 unsigned ssa; /* size of the system area */ 207 fat_cluster_t fcl, fat_cluster_t *clp, aoff64_t bn, int flags) 208 { 145 209 uint16_t clusters; 146 210 unsigned max_clusters; 147 fat_cluster_t lastc;211 fat_cluster_t c; 148 212 int rc; 149 213 … … 151 215 * This function can only operate on non-zero length files. 152 216 */ 153 if (f irstc== FAT_CLST_RES0)217 if (fcl == FAT_CLST_RES0) 154 218 return ELIMIT; 155 219 156 bps = uint16_t_le2host(bs->bps); 157 rscnt = uint16_t_le2host(bs->rscnt); 158 rde = uint16_t_le2host(bs->root_ent_max); 159 sf = uint16_t_le2host(bs->sec_per_fat); 160 161 rds = (sizeof(fat_dentry_t) * rde) / bps; 162 rds += ((sizeof(fat_dentry_t) * rde) % bps != 0); 163 ssa = rscnt + bs->fatcnt * sf + rds; 164 165 if (firstc == FAT_CLST_ROOT) { 220 if (fcl == FAT_CLST_ROOT) { 166 221 /* root directory special case */ 167 assert(bn < rds);168 rc = block_get(block, dev_handle, rscnt + bs->fatcnt * sf + bn,169 flags);222 assert(bn < RDS(bs)); 223 rc = block_get(block, dev_handle, 224 RSCNT(bs) + FATCNT(bs) * SF(bs) + bn, flags); 170 225 return rc; 171 226 } 172 227 173 max_clusters = bn / bs->spc; 174 rc = fat_cluster_walk(bs, dev_handle, firstc, &lastc, &clusters, 175 max_clusters); 228 max_clusters = bn / SPC(bs); 229 rc = fat_cluster_walk(bs, dev_handle, fcl, &c, &clusters, max_clusters); 176 230 if (rc != EOK) 177 231 return rc; 178 232 assert(clusters == max_clusters); 179 233 180 rc = block_get(block, dev_handle, 181 ssa + (lastc - FAT_CLST_FIRST) * bs->spc + bn % bs->spc, flags); 234 rc = block_get(block, dev_handle, CLBN2PBN(bs, c, bn), flags); 235 236 if (clp) 237 *clp = c; 182 238 183 239 return rc; … … 198 254 int fat_fill_gap(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl, aoff64_t pos) 199 255 { 200 uint16_t bps;201 unsigned spc;202 256 block_t *b; 203 257 aoff64_t o, boundary; 204 258 int rc; 205 259 206 bps = uint16_t_le2host(bs->bps); 207 spc = bs->spc; 208 209 boundary = ROUND_UP(nodep->size, bps * spc); 260 boundary = ROUND_UP(nodep->size, BPS(bs) * SPC(bs)); 210 261 211 262 /* zero out already allocated space */ 212 263 for (o = nodep->size; o < pos && o < boundary; 213 o = ALIGN_DOWN(o + bps, bps)) {214 int flags = (o % bps== 0) ?264 o = ALIGN_DOWN(o + BPS(bs), BPS(bs))) { 265 int flags = (o % BPS(bs) == 0) ? 215 266 BLOCK_FLAGS_NOREAD : BLOCK_FLAGS_NONE; 216 rc = fat_block_get(&b, bs, nodep, o / bps, flags);217 if (rc != EOK) 218 return rc; 219 memset(b->data + o % bps, 0, bps - o % bps);267 rc = fat_block_get(&b, bs, nodep, o / BPS(bs), flags); 268 if (rc != EOK) 269 return rc; 270 memset(b->data + o % BPS(bs), 0, BPS(bs) - o % BPS(bs)); 220 271 b->dirty = true; /* need to sync node */ 221 272 rc = block_put(b); … … 228 279 229 280 /* zero out the initial part of the new cluster chain */ 230 for (o = boundary; o < pos; o += bps) {281 for (o = boundary; o < pos; o += BPS(bs)) { 231 282 rc = _fat_block_get(&b, bs, nodep->idx->dev_handle, mcl, 232 (o - boundary) / bps, BLOCK_FLAGS_NOREAD);233 if (rc != EOK) 234 return rc; 235 memset(b->data, 0, min( bps, pos - o));283 NULL, (o - boundary) / BPS(bs), BLOCK_FLAGS_NOREAD); 284 if (rc != EOK) 285 return rc; 286 memset(b->data, 0, min(BPS(bs), pos - o)); 236 287 b->dirty = true; /* need to sync node */ 237 288 rc = block_put(b); … … 257 308 { 258 309 block_t *b; 259 uint16_t bps;260 uint16_t rscnt;261 uint16_t sf;262 310 fat_cluster_t *cp; 263 311 int rc; 264 312 265 bps = uint16_t_le2host(bs->bps); 266 rscnt = uint16_t_le2host(bs->rscnt); 267 sf = uint16_t_le2host(bs->sec_per_fat); 268 269 rc = block_get(&b, dev_handle, rscnt + sf * fatno + 270 (clst * sizeof(fat_cluster_t)) / bps, BLOCK_FLAGS_NONE); 313 rc = block_get(&b, dev_handle, RSCNT(bs) + SF(bs) * fatno + 314 (clst * sizeof(fat_cluster_t)) / BPS(bs), BLOCK_FLAGS_NONE); 271 315 if (rc != EOK) 272 316 return rc; 273 cp = (fat_cluster_t *)b->data + clst % (bps / sizeof(fat_cluster_t)); 317 cp = (fat_cluster_t *)b->data + 318 clst % (BPS(bs) / sizeof(fat_cluster_t)); 274 319 *value = uint16_t_le2host(*cp); 275 320 rc = block_put(b); … … 293 338 { 294 339 block_t *b; 295 uint16_t bps;296 uint16_t rscnt;297 uint16_t sf;298 340 fat_cluster_t *cp; 299 341 int rc; 300 342 301 bps = uint16_t_le2host(bs->bps); 302 rscnt = uint16_t_le2host(bs->rscnt); 303 sf = uint16_t_le2host(bs->sec_per_fat); 304 305 assert(fatno < bs->fatcnt); 306 rc = block_get(&b, dev_handle, rscnt + sf * fatno + 307 (clst * sizeof(fat_cluster_t)) / bps, BLOCK_FLAGS_NONE); 343 assert(fatno < FATCNT(bs)); 344 rc = block_get(&b, dev_handle, RSCNT(bs) + SF(bs) * fatno + 345 (clst * sizeof(fat_cluster_t)) / BPS(bs), BLOCK_FLAGS_NONE); 308 346 if (rc != EOK) 309 347 return rc; 310 cp = (fat_cluster_t *)b->data + clst % (bps / sizeof(fat_cluster_t)); 348 cp = (fat_cluster_t *)b->data + 349 clst % (BPS(bs) / sizeof(fat_cluster_t)); 311 350 *cp = host2uint16_t_le(value); 312 351 b->dirty = true; /* need to sync block */ … … 364 403 fat_cluster_t *mcl, fat_cluster_t *lcl) 365 404 { 366 uint16_t bps;367 uint16_t rscnt;368 uint16_t sf;369 uint32_t ts;370 unsigned rde;371 unsigned rds;372 unsigned ssa;373 405 block_t *blk; 374 406 fat_cluster_t *lifo; /* stack for storing free cluster numbers */ … … 380 412 if (!lifo) 381 413 return ENOMEM; 382 383 bps = uint16_t_le2host(bs->bps);384 rscnt = uint16_t_le2host(bs->rscnt);385 sf = uint16_t_le2host(bs->sec_per_fat);386 rde = uint16_t_le2host(bs->root_ent_max);387 ts = (uint32_t) uint16_t_le2host(bs->totsec16);388 if (ts == 0)389 ts = uint32_t_le2host(bs->totsec32);390 391 rds = (sizeof(fat_dentry_t) * rde) / bps;392 rds += ((sizeof(fat_dentry_t) * rde) % bps != 0);393 ssa = rscnt + bs->fatcnt * sf + rds;394 414 395 415 /* … … 397 417 */ 398 418 fibril_mutex_lock(&fat_alloc_lock); 399 for (b = 0, cl = 0; b < sf; b++) { 400 rc = block_get(&blk, dev_handle, rscnt + b, BLOCK_FLAGS_NONE); 419 for (b = 0, cl = 0; b < SF(bs); b++) { 420 rc = block_get(&blk, dev_handle, RSCNT(bs) + b, 421 BLOCK_FLAGS_NONE); 401 422 if (rc != EOK) 402 423 goto error; 403 for (c = 0; c < bps/ sizeof(fat_cluster_t); c++, cl++) {424 for (c = 0; c < BPS(bs) / sizeof(fat_cluster_t); c++, cl++) { 404 425 /* 405 426 * Check if the cluster is physically there. This check … … 408 429 * from the size of the file allocation table. 409 430 */ 410 if ((cl >= 2) && ((cl - 2) * bs->spc + ssa >= ts)) { 431 if ((cl >= 2) && 432 ((cl - 2) * SPC(bs) + SSA(bs) >= TS(bs))) { 411 433 rc = block_put(blk); 412 434 if (rc != EOK) … … 511 533 * @param nodep Node representing the file. 512 534 * @param mcl First cluster of the cluster chain to append. 535 * @param lcl Last cluster of the cluster chain to append. 513 536 * 514 537 * @return EOK on success or a negative error code. 515 538 */ 516 int fat_append_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl) 539 int 540 fat_append_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl, 541 fat_cluster_t lcl) 517 542 { 518 543 dev_handle_t dev_handle = nodep->idx->dev_handle; 519 fat_cluster_t l cl;544 fat_cluster_t lastc; 520 545 uint16_t numc; 521 546 uint8_t fatno; 522 547 int rc; 523 548 524 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc, &lcl, &numc, 525 (uint16_t) -1); 526 if (rc != EOK) 527 return rc; 528 529 if (numc == 0) { 530 /* No clusters allocated to the node yet. */ 531 nodep->firstc = mcl; 532 nodep->dirty = true; /* need to sync node */ 533 return EOK; 549 if (nodep->lastc_cached_valid) { 550 lastc = nodep->lastc_cached_value; 551 nodep->lastc_cached_valid = false; 552 } else { 553 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc, &lastc, 554 &numc, (uint16_t) -1); 555 if (rc != EOK) 556 return rc; 557 558 if (numc == 0) { 559 /* No clusters allocated to the node yet. */ 560 nodep->firstc = mcl; 561 nodep->dirty = true; /* need to sync node */ 562 return EOK; 563 } 534 564 } 535 565 536 566 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 537 rc = fat_set_cluster(bs, nodep->idx->dev_handle, fatno, l cl,567 rc = fat_set_cluster(bs, nodep->idx->dev_handle, fatno, lastc, 538 568 mcl); 539 569 if (rc != EOK) 540 570 return rc; 541 571 } 572 573 nodep->lastc_cached_valid = true; 574 nodep->lastc_cached_value = lcl; 542 575 543 576 return EOK; … … 548 581 * @param bs Buffer holding the boot sector of the file system. 549 582 * @param nodep FAT node where the chopping will take place. 550 * @param l astcLast cluster which will remain in the node. If this583 * @param lcl Last cluster which will remain in the node. If this 551 584 * argument is FAT_CLST_RES0, then all clusters will 552 585 * be chopped off. … … 554 587 * @return EOK on success or a negative return code. 555 588 */ 556 int fat_chop_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t lastc) 557 { 558 int rc; 559 589 int fat_chop_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t lcl) 590 { 591 int rc; 560 592 dev_handle_t dev_handle = nodep->idx->dev_handle; 561 if (lastc == FAT_CLST_RES0) { 593 594 /* 595 * Invalidate cached cluster numbers. 596 */ 597 nodep->lastc_cached_valid = false; 598 if (nodep->currc_cached_value != lcl) 599 nodep->currc_cached_valid = false; 600 601 if (lcl == FAT_CLST_RES0) { 562 602 /* The node will have zero size and no clusters allocated. */ 563 603 rc = fat_free_clusters(bs, dev_handle, nodep->firstc); … … 570 610 unsigned fatno; 571 611 572 rc = fat_get_cluster(bs, dev_handle, FAT1, l astc, &nextc);612 rc = fat_get_cluster(bs, dev_handle, FAT1, lcl, &nextc); 573 613 if (rc != EOK) 574 614 return rc; … … 576 616 /* Terminate the cluster chain in all copies of FAT. */ 577 617 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 578 rc = fat_set_cluster(bs, dev_handle, fatno, l astc,618 rc = fat_set_cluster(bs, dev_handle, fatno, lcl, 579 619 FAT_CLST_LAST1); 580 620 if (rc != EOK) … … 588 628 } 589 629 630 /* 631 * Update and re-enable the last cluster cache. 632 */ 633 nodep->lastc_cached_valid = true; 634 nodep->lastc_cached_value = lcl; 635 590 636 return EOK; 591 637 } … … 596 642 int i; 597 643 block_t *b; 598 unsigned bps; 599 int rc; 600 601 bps = uint16_t_le2host(bs->bps); 602 603 for (i = 0; i < bs->spc; i++) { 604 rc = _fat_block_get(&b, bs, dev_handle, c, i, 644 int rc; 645 646 for (i = 0; i < SPC(bs); i++) { 647 rc = _fat_block_get(&b, bs, dev_handle, c, NULL, i, 605 648 BLOCK_FLAGS_NOREAD); 606 649 if (rc != EOK) 607 650 return rc; 608 memset(b->data, 0, bps);651 memset(b->data, 0, BPS(bs)); 609 652 b->dirty = true; 610 653 rc = block_put(b); -
uspace/srv/fs/fat/fat_fat.h
r14f2100 r9a1d8ab 64 64 fat_cluster_t *, uint16_t *, uint16_t); 65 65 66 #define fat_block_get(b, bs, np, bn, flags) \ 67 _fat_block_get((b), (bs), (np)->idx->dev_handle, (np)->firstc, (bn), \ 68 (flags)) 69 66 extern int fat_block_get(block_t **, struct fat_bs *, struct fat_node *, 67 aoff64_t, int); 70 68 extern int _fat_block_get(block_t **, struct fat_bs *, dev_handle_t, 71 fat_cluster_t, aoff64_t, int);69 fat_cluster_t, fat_cluster_t *, aoff64_t, int); 72 70 73 71 extern int fat_append_clusters(struct fat_bs *, struct fat_node *, 74 fat_cluster_t );72 fat_cluster_t, fat_cluster_t); 75 73 extern int fat_chop_clusters(struct fat_bs *, struct fat_node *, 76 74 fat_cluster_t); -
uspace/srv/fs/fat/fat_ops.c
r14f2100 r9a1d8ab 60 60 #define FS_NODE(node) ((node) ? (node)->bp : NULL) 61 61 62 #define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t)) 63 #define BPC(bs) (BPS((bs)) * SPC((bs))) 64 62 65 /** Mutex protecting the list of cached free FAT nodes. */ 63 66 static FIBRIL_MUTEX_INITIALIZE(ffn_mutex); … … 101 104 node->refcnt = 0; 102 105 node->dirty = false; 106 node->lastc_cached_valid = false; 107 node->lastc_cached_value = FAT_CLST_LAST1; 108 node->currc_cached_valid = false; 109 node->currc_cached_bn = 0; 110 node->currc_cached_value = FAT_CLST_LAST1; 103 111 } 104 112 … … 108 116 fat_bs_t *bs; 109 117 fat_dentry_t *d; 110 uint16_t bps;111 unsigned dps;112 118 int rc; 113 119 … … 115 121 116 122 bs = block_bb_get(node->idx->dev_handle); 117 bps = uint16_t_le2host(bs->bps);118 dps = bps / sizeof(fat_dentry_t);119 123 120 124 /* Read the block that contains the dentry of interest. */ 121 125 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc, 122 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); 126 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 127 BLOCK_FLAGS_NONE); 123 128 if (rc != EOK) 124 129 return rc; 125 130 126 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);131 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs)); 127 132 128 133 d->firstc = host2uint16_t_le(node->firstc); … … 266 271 fat_dentry_t *d; 267 272 fat_node_t *nodep = NULL; 268 unsigned bps;269 unsigned spc;270 unsigned dps;271 273 int rc; 272 274 … … 298 300 299 301 bs = block_bb_get(idxp->dev_handle); 300 bps = uint16_t_le2host(bs->bps);301 spc = bs->spc;302 dps = bps / sizeof(fat_dentry_t);303 302 304 303 /* Read the block that contains the dentry of interest. */ 305 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc, 306 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);304 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc, NULL, 305 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE); 307 306 if (rc != EOK) { 308 307 (void) fat_node_put(FS_NODE(nodep)); … … 310 309 } 311 310 312 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);311 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs)); 313 312 if (d->attr & FAT_ATTR_SUBDIR) { 314 313 /* … … 330 329 return rc; 331 330 } 332 nodep->size = bps * spc* clusters;331 nodep->size = BPS(bs) * SPC(bs) * clusters; 333 332 } else { 334 333 nodep->type = FAT_FILE; … … 368 367 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; 369 368 unsigned i, j; 370 unsigned bps; /* bytes per sector */371 unsigned dps; /* dentries per sector */372 369 unsigned blocks; 373 370 fat_dentry_t *d; … … 377 374 fibril_mutex_lock(&parentp->idx->lock); 378 375 bs = block_bb_get(parentp->idx->dev_handle); 379 bps = uint16_t_le2host(bs->bps); 380 dps = bps / sizeof(fat_dentry_t); 381 blocks = parentp->size / bps; 376 blocks = parentp->size / BPS(bs); 382 377 for (i = 0; i < blocks; i++) { 383 378 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); … … 386 381 return rc; 387 382 } 388 for (j = 0; j < dps; j++) {383 for (j = 0; j < DPS(bs); j++) { 389 384 d = ((fat_dentry_t *)b->data) + j; 390 385 switch (fat_classify_dentry(d)) { … … 414 409 fat_idx_t *idx = fat_idx_get_by_pos( 415 410 parentp->idx->dev_handle, parentp->firstc, 416 i * dps+ j);411 i * DPS(bs) + j); 417 412 fibril_mutex_unlock(&parentp->idx->lock); 418 413 if (!idx) { … … 513 508 fat_bs_t *bs; 514 509 fat_cluster_t mcl, lcl; 515 uint16_t bps;516 510 int rc; 517 511 518 512 bs = block_bb_get(dev_handle); 519 bps = uint16_t_le2host(bs->bps);520 513 if (flags & L_DIRECTORY) { 521 514 /* allocate a cluster */ … … 546 539 nodep->type = FAT_DIRECTORY; 547 540 nodep->firstc = mcl; 548 nodep->size = bps * bs->spc;541 nodep->size = BPS(bs) * SPC(bs); 549 542 } else { 550 543 nodep->type = FAT_FILE; … … 609 602 block_t *b; 610 603 unsigned i, j; 611 uint16_t bps;612 unsigned dps;613 604 unsigned blocks; 614 605 fat_cluster_t mcl, lcl; … … 640 631 fibril_mutex_lock(&parentp->idx->lock); 641 632 bs = block_bb_get(parentp->idx->dev_handle); 642 bps = uint16_t_le2host(bs->bps); 643 dps = bps / sizeof(fat_dentry_t); 644 645 blocks = parentp->size / bps; 633 634 blocks = parentp->size / BPS(bs); 646 635 647 636 for (i = 0; i < blocks; i++) { … … 651 640 return rc; 652 641 } 653 for (j = 0; j < dps; j++) {642 for (j = 0; j < DPS(bs); j++) { 654 643 d = ((fat_dentry_t *)b->data) + j; 655 644 switch (fat_classify_dentry(d)) { … … 691 680 return rc; 692 681 } 693 rc = fat_append_clusters(bs, parentp, mcl );682 rc = fat_append_clusters(bs, parentp, mcl, lcl); 694 683 if (rc != EOK) { 695 684 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl); … … 697 686 return rc; 698 687 } 699 parentp->size += bps * bs->spc;688 parentp->size += BPS(bs) * SPC(bs); 700 689 parentp->dirty = true; /* need to sync node */ 701 690 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); … … 771 760 772 761 childp->idx->pfc = parentp->firstc; 773 childp->idx->pdi = i * dps+ j;762 childp->idx->pdi = i * DPS(bs) + j; 774 763 fibril_mutex_unlock(&childp->idx->lock); 775 764 … … 793 782 fat_bs_t *bs; 794 783 fat_dentry_t *d; 795 uint16_t bps;796 784 block_t *b; 797 785 bool has_children; … … 812 800 fibril_mutex_lock(&childp->idx->lock); 813 801 bs = block_bb_get(childp->idx->dev_handle); 814 bps = uint16_t_le2host(bs->bps);815 802 816 803 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc, 817 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,804 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 818 805 BLOCK_FLAGS_NONE); 819 806 if (rc != EOK) 820 807 goto error; 821 808 d = (fat_dentry_t *)b->data + 822 (childp->idx->pdi % ( bps/ sizeof(fat_dentry_t)));809 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t))); 823 810 /* mark the dentry as not-currently-used */ 824 811 d->name[0] = FAT_DENTRY_ERASED; … … 852 839 fat_bs_t *bs; 853 840 fat_node_t *nodep = FAT_NODE(fn); 854 unsigned bps;855 unsigned dps;856 841 unsigned blocks; 857 842 block_t *b; … … 866 851 fibril_mutex_lock(&nodep->idx->lock); 867 852 bs = block_bb_get(nodep->idx->dev_handle); 868 bps = uint16_t_le2host(bs->bps); 869 dps = bps / sizeof(fat_dentry_t); 870 871 blocks = nodep->size / bps; 853 854 blocks = nodep->size / BPS(bs); 872 855 873 856 for (i = 0; i < blocks; i++) { … … 879 862 return rc; 880 863 } 881 for (j = 0; j < dps; j++) {864 for (j = 0; j < DPS(bs); j++) { 882 865 d = ((fat_dentry_t *)b->data) + j; 883 866 switch (fat_classify_dentry(d)) { … … 976 959 enum cache_mode cmode; 977 960 fat_bs_t *bs; 978 uint16_t bps;979 uint16_t rde;980 961 981 962 /* Accept the mount options */ … … 1014 995 bs = block_bb_get(dev_handle); 1015 996 1016 /* Read the number of root directory entries. */ 1017 bps = uint16_t_le2host(bs->bps); 1018 rde = uint16_t_le2host(bs->root_ent_max); 1019 1020 if (bps != BS_SIZE) { 997 if (BPS(bs) != BS_SIZE) { 1021 998 block_fini(dev_handle); 1022 999 ipc_answer_0(rid, ENOTSUP); … … 1025 1002 1026 1003 /* Initialize the block cache */ 1027 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode);1004 rc = block_cache_init(dev_handle, BPS(bs), 0 /* XXX */, cmode); 1028 1005 if (rc != EOK) { 1029 1006 block_fini(dev_handle); … … 1087 1064 rootp->refcnt = 1; 1088 1065 rootp->lnkcnt = 0; /* FS root is not linked */ 1089 rootp->size = rde* sizeof(fat_dentry_t);1066 rootp->size = RDE(bs) * sizeof(fat_dentry_t); 1090 1067 rootp->idx = ridxp; 1091 1068 ridxp->nodep = rootp; … … 1165 1142 fat_node_t *nodep; 1166 1143 fat_bs_t *bs; 1167 uint16_t bps;1168 1144 size_t bytes; 1169 1145 block_t *b; … … 1191 1167 1192 1168 bs = block_bb_get(dev_handle); 1193 bps = uint16_t_le2host(bs->bps);1194 1169 1195 1170 if (nodep->type == FAT_FILE) { … … 1204 1179 (void) async_data_read_finalize(callid, NULL, 0); 1205 1180 } else { 1206 bytes = min(len, bps - pos % bps);1181 bytes = min(len, BPS(bs) - pos % BPS(bs)); 1207 1182 bytes = min(bytes, nodep->size - pos); 1208 rc = fat_block_get(&b, bs, nodep, pos / bps,1183 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), 1209 1184 BLOCK_FLAGS_NONE); 1210 1185 if (rc != EOK) { … … 1214 1189 return; 1215 1190 } 1216 (void) async_data_read_finalize(callid, b->data + pos % bps,1217 b ytes);1191 (void) async_data_read_finalize(callid, 1192 b->data + pos % BPS(bs), bytes); 1218 1193 rc = block_put(b); 1219 1194 if (rc != EOK) { … … 1230 1205 1231 1206 assert(nodep->type == FAT_DIRECTORY); 1232 assert(nodep->size % bps== 0);1233 assert( bps% sizeof(fat_dentry_t) == 0);1207 assert(nodep->size % BPS(bs) == 0); 1208 assert(BPS(bs) % sizeof(fat_dentry_t) == 0); 1234 1209 1235 1210 /* … … 1239 1214 * the position pointer accordingly. 1240 1215 */ 1241 bnum = (pos * sizeof(fat_dentry_t)) / bps;1242 while (bnum < nodep->size / bps) {1216 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs); 1217 while (bnum < nodep->size / BPS(bs)) { 1243 1218 aoff64_t o; 1244 1219 … … 1247 1222 if (rc != EOK) 1248 1223 goto err; 1249 for (o = pos % ( bps/ sizeof(fat_dentry_t));1250 o < bps/ sizeof(fat_dentry_t);1224 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t)); 1225 o < BPS(bs) / sizeof(fat_dentry_t); 1251 1226 o++, pos++) { 1252 1227 d = ((fat_dentry_t *)b->data) + o; … … 1306 1281 size_t bytes, size; 1307 1282 block_t *b; 1308 uint16_t bps;1309 unsigned spc;1310 unsigned bpc; /* bytes per cluster */1311 1283 aoff64_t boundary; 1312 1284 int flags = BLOCK_FLAGS_NONE; … … 1334 1306 1335 1307 bs = block_bb_get(dev_handle); 1336 bps = uint16_t_le2host(bs->bps);1337 spc = bs->spc;1338 bpc = bps * spc;1339 1308 1340 1309 /* … … 1345 1314 * value signalizing a smaller number of bytes written. 1346 1315 */ 1347 bytes = min(len, bps - pos % bps);1348 if (bytes == bps)1316 bytes = min(len, BPS(bs) - pos % BPS(bs)); 1317 if (bytes == BPS(bs)) 1349 1318 flags |= BLOCK_FLAGS_NOREAD; 1350 1319 1351 boundary = ROUND_UP(nodep->size, bpc);1320 boundary = ROUND_UP(nodep->size, BPC(bs)); 1352 1321 if (pos < boundary) { 1353 1322 /* … … 1364 1333 return; 1365 1334 } 1366 rc = fat_block_get(&b, bs, nodep, pos / bps, flags);1335 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags); 1367 1336 if (rc != EOK) { 1368 1337 (void) fat_node_put(fn); … … 1371 1340 return; 1372 1341 } 1373 (void) async_data_write_finalize(callid, b->data + pos % bps,1374 b ytes);1342 (void) async_data_write_finalize(callid, 1343 b->data + pos % BPS(bs), bytes); 1375 1344 b->dirty = true; /* need to sync block */ 1376 1345 rc = block_put(b); … … 1396 1365 fat_cluster_t mcl, lcl; 1397 1366 1398 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;1367 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs); 1399 1368 /* create an independent chain of nclsts clusters in all FATs */ 1400 1369 rc = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); … … 1415 1384 return; 1416 1385 } 1417 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc,1418 flags);1386 rc = _fat_block_get(&b, bs, dev_handle, lcl, NULL, 1387 (pos / BPS(bs)) % SPC(bs), flags); 1419 1388 if (rc != EOK) { 1420 1389 (void) fat_free_clusters(bs, dev_handle, mcl); … … 1424 1393 return; 1425 1394 } 1426 (void) async_data_write_finalize(callid, b->data + pos % bps,1427 b ytes);1395 (void) async_data_write_finalize(callid, 1396 b->data + pos % BPS(bs), bytes); 1428 1397 b->dirty = true; /* need to sync block */ 1429 1398 rc = block_put(b); … … 1438 1407 * node's cluster chain. 1439 1408 */ 1440 rc = fat_append_clusters(bs, nodep, mcl );1409 rc = fat_append_clusters(bs, nodep, mcl, lcl); 1441 1410 if (rc != EOK) { 1442 1411 (void) fat_free_clusters(bs, dev_handle, mcl); … … 1462 1431 fat_node_t *nodep; 1463 1432 fat_bs_t *bs; 1464 uint16_t bps;1465 uint8_t spc;1466 unsigned bpc; /* bytes per cluster */1467 1433 int rc; 1468 1434 … … 1479 1445 1480 1446 bs = block_bb_get(dev_handle); 1481 bps = uint16_t_le2host(bs->bps);1482 spc = bs->spc;1483 bpc = bps * spc;1484 1447 1485 1448 if (nodep->size == size) { … … 1491 1454 */ 1492 1455 rc = EINVAL; 1493 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {1456 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) { 1494 1457 /* 1495 1458 * The node will be shrunk, but no clusters will be deallocated. … … 1509 1472 fat_cluster_t lastc; 1510 1473 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc, 1511 &lastc, NULL, (size - 1) / bpc);1474 &lastc, NULL, (size - 1) / BPC(bs)); 1512 1475 if (rc != EOK) 1513 1476 goto out;
Note:
See TracChangeset
for help on using the changeset viewer.