source: mainline/uspace/lib/block/block.c@ 9be30cdf

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9be30cdf was b7adc22, checked in by Martin Decky <martin@…>, 12 years ago

remove cross-include (thx Jiri Zarevucky)

  • Property mode set to 100644
File size: 21.9 KB
RevLine 
[fc840d9]1/*
[ed903174]2 * Copyright (c) 2008 Jakub Jermar
3 * Copyright (c) 2008 Martin Decky
[e272949]4 * Copyright (c) 2011 Martin Sucha
[fc840d9]5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
[97c9da8]31/** @addtogroup libblock
[fc840d9]32 * @{
[97c9da8]33 */
[fc840d9]34/**
35 * @file
36 * @brief
37 */
38
[15f3c3f]39#include <ipc/loc.h>
[7858bc5f]40#include <ipc/services.h>
[fc840d9]41#include <errno.h>
[7858bc5f]42#include <sys/mman.h>
[fc840d9]43#include <async.h>
44#include <as.h>
45#include <assert.h>
[4802dd7]46#include <bd.h>
[1e4cada]47#include <fibril_synch.h>
[d9c8c81]48#include <adt/list.h>
49#include <adt/hash_table.h>
[1ee00b7]50#include <macros.h>
[d00ae4c]51#include <mem.h>
[c7bbf029]52#include <malloc.h>
53#include <stdio.h>
[16fc3c9]54#include <sys/typefmt.h>
55#include <stacktrace.h>
[f73b291]56#include "block.h"
[fc840d9]57
[916bf1a]58/** Lock protecting the device connection list */
[4e1b57d]59static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
[916bf1a]60/** Device connection list head. */
[b72efe8]61static LIST_INITIALIZE(dcl);
[916bf1a]62
[f1ba5d6]63
64typedef struct {
[4e1b57d]65 fibril_mutex_t lock;
[79ae36dd]66 size_t lblock_size; /**< Logical block size. */
67 unsigned blocks_cluster; /**< Physical blocks per block_t */
68 unsigned block_count; /**< Total number of blocks. */
69 unsigned blocks_cached; /**< Number of cached blocks. */
[f1ba5d6]70 hash_table_t block_hash;
[b72efe8]71 list_t free_list;
[1fbe064b]72 enum cache_mode mode;
[f1ba5d6]73} cache_t;
74
[916bf1a]75typedef struct {
76 link_t link;
[15f3c3f]77 service_id_t service_id;
[79ae36dd]78 async_sess_t *sess;
[4802dd7]79 bd_t *bd;
[916bf1a]80 void *bb_buf;
[ed903174]81 aoff64_t bb_addr;
[79ae36dd]82 size_t pblock_size; /**< Physical block size. */
[f1ba5d6]83 cache_t *cache;
[916bf1a]84} devcon_t;
85
[4802dd7]86static int read_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
87static int write_blocks(devcon_t *, aoff64_t, size_t, void *, size_t);
[79ae36dd]88static aoff64_t ba_ltop(devcon_t *, aoff64_t);
[1fbe064b]89
[15f3c3f]90static devcon_t *devcon_search(service_id_t service_id)
[916bf1a]91{
[4e1b57d]92 fibril_mutex_lock(&dcl_lock);
[79ae36dd]93
[feeac0d]94 list_foreach(dcl, link, devcon_t, devcon) {
[15f3c3f]95 if (devcon->service_id == service_id) {
[4e1b57d]96 fibril_mutex_unlock(&dcl_lock);
[916bf1a]97 return devcon;
98 }
99 }
[79ae36dd]100
[4e1b57d]101 fibril_mutex_unlock(&dcl_lock);
[916bf1a]102 return NULL;
103}
104
[15f3c3f]105static int devcon_add(service_id_t service_id, async_sess_t *sess,
[4802dd7]106 size_t bsize, bd_t *bd)
[916bf1a]107{
108 devcon_t *devcon;
[79ae36dd]109
[916bf1a]110 devcon = malloc(sizeof(devcon_t));
111 if (!devcon)
112 return ENOMEM;
113
114 link_initialize(&devcon->link);
[15f3c3f]115 devcon->service_id = service_id;
[79ae36dd]116 devcon->sess = sess;
[4802dd7]117 devcon->bd = bd;
[6284978]118 devcon->bb_buf = NULL;
[1ee00b7]119 devcon->bb_addr = 0;
120 devcon->pblock_size = bsize;
[f1ba5d6]121 devcon->cache = NULL;
[79ae36dd]122
[4e1b57d]123 fibril_mutex_lock(&dcl_lock);
[feeac0d]124 list_foreach(dcl, link, devcon_t, d) {
[15f3c3f]125 if (d->service_id == service_id) {
[4e1b57d]126 fibril_mutex_unlock(&dcl_lock);
[916bf1a]127 free(devcon);
128 return EEXIST;
129 }
130 }
[b72efe8]131 list_append(&devcon->link, &dcl);
[4e1b57d]132 fibril_mutex_unlock(&dcl_lock);
[916bf1a]133 return EOK;
134}
135
136static void devcon_remove(devcon_t *devcon)
137{
[4e1b57d]138 fibril_mutex_lock(&dcl_lock);
[916bf1a]139 list_remove(&devcon->link);
[4e1b57d]140 fibril_mutex_unlock(&dcl_lock);
[916bf1a]141}
[7858bc5f]142
[15f3c3f]143int block_init(exch_mgmt_t mgmt, service_id_t service_id,
[79ae36dd]144 size_t comm_size)
[7858bc5f]145{
[4802dd7]146 bd_t *bd;
147
[15f3c3f]148 async_sess_t *sess = loc_service_connect(mgmt, service_id,
[79ae36dd]149 IPC_FLAG_BLOCKING);
150 if (!sess) {
151 return ENOENT;
[7858bc5f]152 }
[79ae36dd]153
[4802dd7]154 int rc = bd_open(sess, &bd);
[7858bc5f]155 if (rc != EOK) {
[79ae36dd]156 async_hangup(sess);
[7858bc5f]157 return rc;
158 }
[79ae36dd]159
160 size_t bsize;
[4802dd7]161 rc = bd_get_block_size(bd, &bsize);
[79ae36dd]162 if (rc != EOK) {
[4802dd7]163 bd_close(bd);
[79ae36dd]164 async_hangup(sess);
[1ee00b7]165 return rc;
166 }
[916bf1a]167
[4802dd7]168 rc = devcon_add(service_id, sess, bsize, bd);
[916bf1a]169 if (rc != EOK) {
[4802dd7]170 bd_close(bd);
[79ae36dd]171 async_hangup(sess);
[916bf1a]172 return rc;
173 }
[79ae36dd]174
[7858bc5f]175 return EOK;
176}
177
[15f3c3f]178void block_fini(service_id_t service_id)
[7858bc5f]179{
[15f3c3f]180 devcon_t *devcon = devcon_search(service_id);
[916bf1a]181 assert(devcon);
182
[64bc4b6]183 if (devcon->cache)
[15f3c3f]184 (void) block_cache_fini(service_id);
[79ae36dd]185
[916bf1a]186 devcon_remove(devcon);
[79ae36dd]187
[6284978]188 if (devcon->bb_buf)
189 free(devcon->bb_buf);
[79ae36dd]190
[4802dd7]191 bd_close(devcon->bd);
[79ae36dd]192 async_hangup(devcon->sess);
193
194 free(devcon);
[7858bc5f]195}
196
[15f3c3f]197int block_bb_read(service_id_t service_id, aoff64_t ba)
[6284978]198{
199 void *bb_buf;
[0c243b4]200 int rc;
[6284978]201
[15f3c3f]202 devcon_t *devcon = devcon_search(service_id);
[6284978]203 if (!devcon)
204 return ENOENT;
205 if (devcon->bb_buf)
206 return EEXIST;
[1ee00b7]207 bb_buf = malloc(devcon->pblock_size);
[6284978]208 if (!bb_buf)
209 return ENOMEM;
[1ee00b7]210
[4802dd7]211 rc = read_blocks(devcon, 0, 1, bb_buf, devcon->pblock_size);
[0c243b4]212 if (rc != EOK) {
[6284978]213 free(bb_buf);
[0c243b4]214 return rc;
[6284978]215 }
[6408be3]216
[6284978]217 devcon->bb_buf = bb_buf;
[1ee00b7]218 devcon->bb_addr = ba;
[6284978]219
220 return EOK;
221}
222
[15f3c3f]223void *block_bb_get(service_id_t service_id)
[7858bc5f]224{
[15f3c3f]225 devcon_t *devcon = devcon_search(service_id);
[916bf1a]226 assert(devcon);
227 return devcon->bb_buf;
[7858bc5f]228}
229
[062d900]230static size_t cache_key_hash(void *key)
[f1ba5d6]231{
[062d900]232 aoff64_t *lba = (aoff64_t*)key;
233 return *lba;
[f1ba5d6]234}
235
[062d900]236static size_t cache_hash(const ht_link_t *item)
[f1ba5d6]237{
[062d900]238 block_t *b = hash_table_get_inst(item, block_t, hash_link);
239 return b->lba;
[f1ba5d6]240}
241
[062d900]242static bool cache_key_equal(void *key, const ht_link_t *item)
[f1ba5d6]243{
[062d900]244 aoff64_t *lba = (aoff64_t*)key;
245 block_t *b = hash_table_get_inst(item, block_t, hash_link);
246 return b->lba == *lba;
[f1ba5d6]247}
248
[062d900]249
250static hash_table_ops_t cache_ops = {
[f1ba5d6]251 .hash = cache_hash,
[062d900]252 .key_hash = cache_key_hash,
253 .key_equal = cache_key_equal,
[4e00f87]254 .equal = NULL,
255 .remove_callback = NULL
[f1ba5d6]256};
257
[15f3c3f]258int block_cache_init(service_id_t service_id, size_t size, unsigned blocks,
[1fbe064b]259 enum cache_mode mode)
[f1ba5d6]260{
[15f3c3f]261 devcon_t *devcon = devcon_search(service_id);
[f1ba5d6]262 cache_t *cache;
263 if (!devcon)
264 return ENOENT;
265 if (devcon->cache)
266 return EEXIST;
267 cache = malloc(sizeof(cache_t));
268 if (!cache)
269 return ENOMEM;
270
[4e1b57d]271 fibril_mutex_initialize(&cache->lock);
[b72efe8]272 list_initialize(&cache->free_list);
[1ee00b7]273 cache->lblock_size = size;
[f1ba5d6]274 cache->block_count = blocks;
[d68e4d5]275 cache->blocks_cached = 0;
[1fbe064b]276 cache->mode = mode;
[f1ba5d6]277
[f092718]278 /* Allow 1:1 or small-to-large block size translation */
[37cf3792]279 if (cache->lblock_size % devcon->pblock_size != 0) {
280 free(cache);
[f092718]281 return ENOTSUP;
[37cf3792]282 }
[f092718]283
284 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size;
[1ee00b7]285
[062d900]286 if (!hash_table_create(&cache->block_hash, 0, 0, &cache_ops)) {
[f1ba5d6]287 free(cache);
288 return ENOMEM;
289 }
290
291 devcon->cache = cache;
292 return EOK;
293}
294
[15f3c3f]295int block_cache_fini(service_id_t service_id)
[64bc4b6]296{
[15f3c3f]297 devcon_t *devcon = devcon_search(service_id);
[64bc4b6]298 cache_t *cache;
299 int rc;
300
301 if (!devcon)
302 return ENOENT;
303 if (!devcon->cache)
304 return EOK;
305 cache = devcon->cache;
306
307 /*
308 * We are expecting to find all blocks for this device handle on the
309 * free list, i.e. the block reference count should be zero. Do not
310 * bother with the cache and block locks because we are single-threaded.
311 */
[b72efe8]312 while (!list_empty(&cache->free_list)) {
313 block_t *b = list_get_instance(list_first(&cache->free_list),
[64bc4b6]314 block_t, free_link);
315
316 list_remove(&b->free_link);
317 if (b->dirty) {
[4802dd7]318 rc = write_blocks(devcon, b->pba, cache->blocks_cluster,
319 b->data, b->size);
[64bc4b6]320 if (rc != EOK)
321 return rc;
322 }
323
[062d900]324 hash_table_remove_item(&cache->block_hash, &b->hash_link);
[64bc4b6]325
326 free(b->data);
327 free(b);
328 }
329
330 hash_table_destroy(&cache->block_hash);
331 devcon->cache = NULL;
332 free(cache);
333
334 return EOK;
335}
336
[d68e4d5]337#define CACHE_LO_WATERMARK 10
338#define CACHE_HI_WATERMARK 20
[e1c88d5]339static bool cache_can_grow(cache_t *cache)
[fc840d9]340{
[d68e4d5]341 if (cache->blocks_cached < CACHE_LO_WATERMARK)
342 return true;
[b72efe8]343 if (!list_empty(&cache->free_list))
[d68e4d5]344 return false;
[e1c88d5]345 return true;
346}
347
348static void block_initialize(block_t *b)
349{
[4e1b57d]350 fibril_mutex_initialize(&b->lock);
[e1c88d5]351 b->refcnt = 1;
352 b->dirty = false;
[cd688d9]353 b->toxic = false;
[4e1b57d]354 fibril_rwlock_initialize(&b->contents_lock);
[e1c88d5]355 link_initialize(&b->free_link);
356}
357
358/** Instantiate a block in memory and get a reference to it.
359 *
[c91f2d1b]360 * @param block Pointer to where the function will store the
361 * block pointer on success.
[15f3c3f]362 * @param service_id Service ID of the block device.
[a6ba0c9]363 * @param ba Block address (logical).
[1d8cdb1]364 * @param flags If BLOCK_FLAGS_NOREAD is specified, block_get()
365 * will not read the contents of the block from the
366 * device.
[e1c88d5]367 *
[c91f2d1b]368 * @return EOK on success or a negative error code.
[e1c88d5]369 */
[15f3c3f]370int block_get(block_t **block, service_id_t service_id, aoff64_t ba, int flags)
[e1c88d5]371{
372 devcon_t *devcon;
373 cache_t *cache;
[fc840d9]374 block_t *b;
[062d900]375 link_t *link;
[867e2555]376
[b7b3fda]377 int rc;
[e1c88d5]378
[15f3c3f]379 devcon = devcon_search(service_id);
[fc840d9]380
[e1c88d5]381 assert(devcon);
382 assert(devcon->cache);
[fc840d9]383
[e1c88d5]384 cache = devcon->cache;
[02ee6bf5]385
386retry:
[b7b3fda]387 rc = EOK;
[4f690cd]388 b = NULL;
[b7b3fda]389
[4e1b57d]390 fibril_mutex_lock(&cache->lock);
[062d900]391 ht_link_t *hlink = hash_table_find(&cache->block_hash, &ba);
392 if (hlink) {
[5716e9a]393found:
[e1c88d5]394 /*
395 * We found the block in the cache.
396 */
[062d900]397 b = hash_table_get_inst(hlink, block_t, hash_link);
[4e1b57d]398 fibril_mutex_lock(&b->lock);
[e1c88d5]399 if (b->refcnt++ == 0)
400 list_remove(&b->free_link);
[402a18f]401 if (b->toxic)
402 rc = EIO;
[4e1b57d]403 fibril_mutex_unlock(&b->lock);
404 fibril_mutex_unlock(&cache->lock);
[e1c88d5]405 } else {
406 /*
407 * The block was not found in the cache.
408 */
409 if (cache_can_grow(cache)) {
410 /*
411 * We can grow the cache by allocating new blocks.
412 * Should the allocation fail, we fail over and try to
413 * recycle a block from the cache.
414 */
415 b = malloc(sizeof(block_t));
416 if (!b)
417 goto recycle;
[1ee00b7]418 b->data = malloc(cache->lblock_size);
[e1c88d5]419 if (!b->data) {
420 free(b);
[0dfaa099]421 b = NULL;
[e1c88d5]422 goto recycle;
423 }
[d68e4d5]424 cache->blocks_cached++;
[e1c88d5]425 } else {
426 /*
427 * Try to recycle a block from the free list.
428 */
429recycle:
[b72efe8]430 if (list_empty(&cache->free_list)) {
[7a56b1ed]431 fibril_mutex_unlock(&cache->lock);
432 rc = ENOMEM;
433 goto out;
434 }
[062d900]435 link = list_first(&cache->free_list);
436 b = list_get_instance(link, block_t, free_link);
[02ee6bf5]437
438 fibril_mutex_lock(&b->lock);
439 if (b->dirty) {
440 /*
441 * The block needs to be written back to the
442 * device before it changes identity. Do this
443 * while not holding the cache lock so that
444 * concurrency is not impeded. Also move the
445 * block to the end of the free list so that we
446 * do not slow down other instances of
447 * block_get() draining the free list.
448 */
449 list_remove(&b->free_link);
[b72efe8]450 list_append(&b->free_link, &cache->free_list);
[02ee6bf5]451 fibril_mutex_unlock(&cache->lock);
[f092718]452 rc = write_blocks(devcon, b->pba,
[4802dd7]453 cache->blocks_cluster, b->data, b->size);
[402a18f]454 if (rc != EOK) {
455 /*
456 * We did not manage to write the block
457 * to the device. Keep it around for
458 * another try. Hopefully, we will grab
459 * another block next time.
460 */
461 fibril_mutex_unlock(&b->lock);
462 goto retry;
463 }
[02ee6bf5]464 b->dirty = false;
465 if (!fibril_mutex_trylock(&cache->lock)) {
466 /*
467 * Somebody is probably racing with us.
468 * Unlock the block and retry.
469 */
470 fibril_mutex_unlock(&b->lock);
471 goto retry;
472 }
[062d900]473 hlink = hash_table_find(&cache->block_hash, &ba);
474 if (hlink) {
[5716e9a]475 /*
476 * Someone else must have already
477 * instantiated the block while we were
478 * not holding the cache lock.
479 * Leave the recycled block on the
480 * freelist and continue as if we
481 * found the block of interest during
482 * the first try.
483 */
484 fibril_mutex_unlock(&b->lock);
485 goto found;
486 }
[02ee6bf5]487
488 }
489 fibril_mutex_unlock(&b->lock);
490
491 /*
492 * Unlink the block from the free list and the hash
493 * table.
494 */
495 list_remove(&b->free_link);
[062d900]496 hash_table_remove_item(&cache->block_hash, &b->hash_link);
[e1c88d5]497 }
[fc840d9]498
[e1c88d5]499 block_initialize(b);
[15f3c3f]500 b->service_id = service_id;
[1ee00b7]501 b->size = cache->lblock_size;
[a6ba0c9]502 b->lba = ba;
503 b->pba = ba_ltop(devcon, b->lba);
[062d900]504 hash_table_insert(&cache->block_hash, &b->hash_link);
[a6d97fb9]505
506 /*
507 * Lock the block before releasing the cache lock. Thus we don't
[5ac8918]508 * kill concurrent operations on the cache while doing I/O on
509 * the block.
[a6d97fb9]510 */
[4e1b57d]511 fibril_mutex_lock(&b->lock);
512 fibril_mutex_unlock(&cache->lock);
[a6d97fb9]513
[1d8cdb1]514 if (!(flags & BLOCK_FLAGS_NOREAD)) {
515 /*
516 * The block contains old or no data. We need to read
517 * the new contents from the device.
518 */
[4802dd7]519 rc = read_blocks(devcon, b->pba, cache->blocks_cluster,
520 b->data, cache->lblock_size);
[402a18f]521 if (rc != EOK)
522 b->toxic = true;
523 } else
524 rc = EOK;
[fc840d9]525
[4e1b57d]526 fibril_mutex_unlock(&b->lock);
[a6d97fb9]527 }
[7a56b1ed]528out:
[4f690cd]529 if ((rc != EOK) && b) {
530 assert(b->toxic);
531 (void) block_put(b);
532 b = NULL;
533 }
[c91f2d1b]534 *block = b;
[402a18f]535 return rc;
[fc840d9]536}
537
[d5a720cf]538/** Release a reference to a block.
539 *
[a6d97fb9]540 * If the last reference is dropped, the block is put on the free list.
[d5a720cf]541 *
542 * @param block Block of which a reference is to be released.
[c91f2d1b]543 *
544 * @return EOK on success or a negative error code.
[d5a720cf]545 */
[c91f2d1b]546int block_put(block_t *block)
[fc840d9]547{
[15f3c3f]548 devcon_t *devcon = devcon_search(block->service_id);
[d5a720cf]549 cache_t *cache;
[ddfc39a3]550 unsigned blocks_cached;
551 enum cache_mode mode;
[402a18f]552 int rc = EOK;
[d5a720cf]553
554 assert(devcon);
555 assert(devcon->cache);
[0f1cf7a]556 assert(block->refcnt >= 1);
[d5a720cf]557
558 cache = devcon->cache;
[ddfc39a3]559
560retry:
561 fibril_mutex_lock(&cache->lock);
562 blocks_cached = cache->blocks_cached;
563 mode = cache->mode;
564 fibril_mutex_unlock(&cache->lock);
565
566 /*
567 * Determine whether to sync the block. Syncing the block is best done
568 * when not holding the cache lock as it does not impede concurrency.
569 * Since the situation may have changed when we unlocked the cache, the
570 * blocks_cached and mode variables are mere hints. We will recheck the
571 * conditions later when the cache lock is held again.
572 */
573 fibril_mutex_lock(&block->lock);
[402a18f]574 if (block->toxic)
575 block->dirty = false; /* will not write back toxic block */
[ddfc39a3]576 if (block->dirty && (block->refcnt == 1) &&
577 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {
[4802dd7]578 rc = write_blocks(devcon, block->pba, cache->blocks_cluster,
579 block->data, block->size);
[ddfc39a3]580 block->dirty = false;
581 }
582 fibril_mutex_unlock(&block->lock);
583
[4e1b57d]584 fibril_mutex_lock(&cache->lock);
585 fibril_mutex_lock(&block->lock);
[d5a720cf]586 if (!--block->refcnt) {
587 /*
[d68e4d5]588 * Last reference to the block was dropped. Either free the
[402a18f]589 * block or put it on the free list. In case of an I/O error,
590 * free the block.
[d68e4d5]591 */
[402a18f]592 if ((cache->blocks_cached > CACHE_HI_WATERMARK) ||
593 (rc != EOK)) {
[d68e4d5]594 /*
[402a18f]595 * Currently there are too many cached blocks or there
596 * was an I/O error when writing the block back to the
597 * device.
[d68e4d5]598 */
599 if (block->dirty) {
[ddfc39a3]600 /*
601 * We cannot sync the block while holding the
602 * cache lock. Release everything and retry.
603 */
604 block->refcnt++;
605 fibril_mutex_unlock(&block->lock);
606 fibril_mutex_unlock(&cache->lock);
607 goto retry;
[d68e4d5]608 }
609 /*
610 * Take the block out of the cache and free it.
611 */
[062d900]612 hash_table_remove_item(&cache->block_hash, &block->hash_link);
[956d4df8]613 fibril_mutex_unlock(&block->lock);
[d68e4d5]614 free(block->data);
[b9e6205]615 free(block);
[d68e4d5]616 cache->blocks_cached--;
617 fibril_mutex_unlock(&cache->lock);
[402a18f]618 return rc;
[d68e4d5]619 }
620 /*
621 * Put the block on the free list.
[d5a720cf]622 */
[1fbe064b]623 if (cache->mode != CACHE_MODE_WB && block->dirty) {
[ddfc39a3]624 /*
625 * We cannot sync the block while holding the cache
626 * lock. Release everything and retry.
627 */
628 block->refcnt++;
629 fibril_mutex_unlock(&block->lock);
630 fibril_mutex_unlock(&cache->lock);
631 goto retry;
[1fbe064b]632 }
[b72efe8]633 list_append(&block->free_link, &cache->free_list);
[d5a720cf]634 }
[4e1b57d]635 fibril_mutex_unlock(&block->lock);
636 fibril_mutex_unlock(&cache->lock);
[c91f2d1b]637
[402a18f]638 return rc;
[d5a720cf]639}
640
[6408be3]641/** Read sequential data from a block device.
[d5a720cf]642 *
[15f3c3f]643 * @param service_id Service ID of the block device.
[4802dd7]644 * @param buf Buffer for holding one block
[d5a720cf]645 * @param bufpos Pointer to the first unread valid offset within the
646 * communication buffer.
647 * @param buflen Pointer to the number of unread bytes that are ready in
648 * the communication buffer.
649 * @param pos Device position to be read.
650 * @param dst Destination buffer.
651 * @param size Size of the destination buffer.
652 * @param block_size Block size to be used for the transfer.
653 *
654 * @return EOK on success or a negative return code on failure.
655 */
[4802dd7]656int block_seqread(service_id_t service_id, void *buf, size_t *bufpos,
657 size_t *buflen, aoff64_t *pos, void *dst, size_t size)
[d5a720cf]658{
[ed903174]659 size_t offset = 0;
[d5a720cf]660 size_t left = size;
[1ee00b7]661 size_t block_size;
662 devcon_t *devcon;
663
[15f3c3f]664 devcon = devcon_search(service_id);
[d5a720cf]665 assert(devcon);
[1ee00b7]666 block_size = devcon->pblock_size;
[e1c88d5]667
[d5a720cf]668 while (left > 0) {
669 size_t rd;
670
671 if (*bufpos + left < *buflen)
672 rd = left;
673 else
674 rd = *buflen - *bufpos;
675
676 if (rd > 0) {
677 /*
678 * Copy the contents of the communication buffer to the
679 * destination buffer.
680 */
[4802dd7]681 memcpy(dst + offset, buf + *bufpos, rd);
[d5a720cf]682 offset += rd;
683 *bufpos += rd;
684 *pos += rd;
685 left -= rd;
686 }
687
[ed903174]688 if (*bufpos == *buflen) {
[d5a720cf]689 /* Refill the communication buffer with a new block. */
[6408be3]690 int rc;
691
[4802dd7]692 rc = read_blocks(devcon, *pos / block_size, 1, buf,
693 devcon->pblock_size);
[d68e4d5]694 if (rc != EOK) {
[6408be3]695 return rc;
[d68e4d5]696 }
[d5a720cf]697
698 *bufpos = 0;
699 *buflen = block_size;
700 }
701 }
702
703 return EOK;
[fc840d9]704}
705
[00b1d20e]706/** Read blocks directly from device (bypass cache).
707 *
[15f3c3f]708 * @param service_id Service ID of the block device.
[a6ba0c9]709 * @param ba Address of first block (physical).
[00b1d20e]710 * @param cnt Number of blocks.
711 * @param src Buffer for storing the data.
712 *
713 * @return EOK on success or negative error code on failure.
714 */
[15f3c3f]715int block_read_direct(service_id_t service_id, aoff64_t ba, size_t cnt, void *buf)
[00b1d20e]716{
717 devcon_t *devcon;
718
[15f3c3f]719 devcon = devcon_search(service_id);
[00b1d20e]720 assert(devcon);
721
[4802dd7]722 return read_blocks(devcon, ba, cnt, buf, devcon->pblock_size * cnt);
[00b1d20e]723}
724
725/** Write blocks directly to device (bypass cache).
726 *
[15f3c3f]727 * @param service_id Service ID of the block device.
[a6ba0c9]728 * @param ba Address of first block (physical).
[00b1d20e]729 * @param cnt Number of blocks.
730 * @param src The data to be written.
731 *
732 * @return EOK on success or negative error code on failure.
733 */
[15f3c3f]734int block_write_direct(service_id_t service_id, aoff64_t ba, size_t cnt,
[00b1d20e]735 const void *data)
736{
737 devcon_t *devcon;
738
[15f3c3f]739 devcon = devcon_search(service_id);
[00b1d20e]740 assert(devcon);
741
[4802dd7]742 return write_blocks(devcon, ba, cnt, (void *)data, devcon->pblock_size * cnt);
[00b1d20e]743}
744
745/** Get device block size.
746 *
[15f3c3f]747 * @param service_id Service ID of the block device.
[00b1d20e]748 * @param bsize Output block size.
749 *
750 * @return EOK on success or negative error code on failure.
751 */
[15f3c3f]752int block_get_bsize(service_id_t service_id, size_t *bsize)
[00b1d20e]753{
754 devcon_t *devcon;
755
[15f3c3f]756 devcon = devcon_search(service_id);
[00b1d20e]757 assert(devcon);
[4802dd7]758
759 return bd_get_block_size(devcon->bd, bsize);
[00b1d20e]760}
761
[08232ee]762/** Get number of blocks on device.
763 *
[15f3c3f]764 * @param service_id Service ID of the block device.
[08232ee]765 * @param nblocks Output number of blocks.
766 *
767 * @return EOK on success or negative error code on failure.
768 */
[15f3c3f]769int block_get_nblocks(service_id_t service_id, aoff64_t *nblocks)
[08232ee]770{
[15f3c3f]771 devcon_t *devcon = devcon_search(service_id);
[08232ee]772 assert(devcon);
773
[4802dd7]774 return bd_get_num_blocks(devcon->bd, nblocks);
[08232ee]775}
776
[e272949]777/** Read bytes directly from the device (bypass cache)
778 *
[15f3c3f]779 * @param service_id Service ID of the block device.
[e272949]780 * @param abs_offset Absolute offset in bytes where to start reading
781 * @param bytes Number of bytes to read
782 * @param data Buffer that receives the data
783 *
784 * @return EOK on success or negative error code on failure.
785 */
[15f3c3f]786int block_read_bytes_direct(service_id_t service_id, aoff64_t abs_offset,
[e272949]787 size_t bytes, void *data)
788{
789 int rc;
790 size_t phys_block_size;
791 size_t buf_size;
792 void *buffer;
793 aoff64_t first_block;
794 aoff64_t last_block;
795 size_t blocks;
796 size_t offset;
797
[15f3c3f]798 rc = block_get_bsize(service_id, &phys_block_size);
[e272949]799 if (rc != EOK) {
800 return rc;
801 }
802
[c4aa9cf]803 /* calculate data position and required space */
[e272949]804 first_block = abs_offset / phys_block_size;
805 offset = abs_offset % phys_block_size;
806 last_block = (abs_offset + bytes - 1) / phys_block_size;
807 blocks = last_block - first_block + 1;
808 buf_size = blocks * phys_block_size;
809
[c4aa9cf]810 /* read the data into memory */
[e272949]811 buffer = malloc(buf_size);
812 if (buffer == NULL) {
813 return ENOMEM;
814 }
815
[15f3c3f]816 rc = block_read_direct(service_id, first_block, blocks, buffer);
[e272949]817 if (rc != EOK) {
818 free(buffer);
819 return rc;
820 }
821
[c4aa9cf]822 /* copy the data from the buffer */
[e272949]823 memcpy(data, buffer + offset, bytes);
824 free(buffer);
[f73b291]825
[e272949]826 return EOK;
827}
828
[4046b2f4]829/** Get TOC from device.
830 *
831 * @param service_id Service ID of the block device.
832 * @param session Starting session.
833 *
[08cba4b]834 * @return Allocated TOC structure.
835 * @return NULL on failure.
[4046b2f4]836 *
837 */
[08cba4b]838toc_block_t *block_get_toc(service_id_t service_id, uint8_t session)
[4046b2f4]839{
840 devcon_t *devcon = devcon_search(service_id);
[08cba4b]841 toc_block_t *toc = NULL;
[4802dd7]842 int rc;
[08cba4b]843
[4802dd7]844 assert(devcon);
[4046b2f4]845
[4802dd7]846 toc = (toc_block_t *) malloc(sizeof(toc_block_t));
847 if (toc == NULL)
848 return NULL;
[08cba4b]849
[4802dd7]850 rc = bd_read_toc(devcon->bd, session, toc, sizeof(toc_block_t));
851 if (rc != EOK) {
852 free(toc);
853 return NULL;
[08cba4b]854 }
855
856 return toc;
[4046b2f4]857}
858
[1ee00b7]859/** Read blocks from block device.
[6408be3]860 *
861 * @param devcon Device connection.
[1ee00b7]862 * @param ba Address of first block.
863 * @param cnt Number of blocks.
[6408be3]864 * @param src Buffer for storing the data.
865 *
866 * @return EOK on success or negative error code on failure.
867 */
[4802dd7]868static int read_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *buf,
869 size_t size)
[6408be3]870{
871 assert(devcon);
[79ae36dd]872
[4802dd7]873 int rc = bd_read_blocks(devcon->bd, ba, cnt, buf, size);
[16fc3c9]874 if (rc != EOK) {
[7e752b2]875 printf("Error %d reading %zu blocks starting at block %" PRIuOFF64
876 " from device handle %" PRIun "\n", rc, cnt, ba,
[15f3c3f]877 devcon->service_id);
[16fc3c9]878#ifndef NDEBUG
879 stacktrace_print();
880#endif
881 }
[79ae36dd]882
[1ee00b7]883 return rc;
[6408be3]884}
885
[1fbe064b]886/** Write block to block device.
887 *
888 * @param devcon Device connection.
[1ee00b7]889 * @param ba Address of first block.
890 * @param cnt Number of blocks.
[1fbe064b]891 * @param src Buffer containing the data to write.
892 *
893 * @return EOK on success or negative error code on failure.
894 */
[4802dd7]895static int write_blocks(devcon_t *devcon, aoff64_t ba, size_t cnt, void *data,
896 size_t size)
[1fbe064b]897{
898 assert(devcon);
[79ae36dd]899
[4802dd7]900 int rc = bd_write_blocks(devcon->bd, ba, cnt, data, size);
[16fc3c9]901 if (rc != EOK) {
[7e752b2]902 printf("Error %d writing %zu blocks starting at block %" PRIuOFF64
[15f3c3f]903 " to device handle %" PRIun "\n", rc, cnt, ba, devcon->service_id);
[16fc3c9]904#ifndef NDEBUG
905 stacktrace_print();
906#endif
907 }
[79ae36dd]908
[1ee00b7]909 return rc;
910}
[1fbe064b]911
[f092718]912/** Convert logical block address to physical block address. */
913static aoff64_t ba_ltop(devcon_t *devcon, aoff64_t lba)
914{
915 assert(devcon->cache != NULL);
916 return lba * devcon->cache->blocks_cluster;
917}
918
[fc840d9]919/** @}
920 */
Note: See TracBrowser for help on using the repository browser.