source: mainline/uspace/drv/block/virtio-blk/virtio-blk.c@ 56210a7a

Last change on this file since 56210a7a was 56210a7a, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 8 months ago

Update GCC and binutils to latest versions

  • Property mode set to 100644
File size: 13.4 KB
Line 
1/*
2 * Copyright (c) 2019 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "virtio-blk.h"
30
31#include <stdio.h>
32#include <stdint.h>
33
34#include <as.h>
35#include <ddf/driver.h>
36#include <ddf/interrupt.h>
37#include <ddf/log.h>
38#include <pci_dev_iface.h>
39#include <fibril_synch.h>
40
41#include <bd_srv.h>
42
43#include <virtio-pci.h>
44
45#define NAME "virtio-blk"
46
47#define VIRTIO_BLK_NUM_QUEUES 1
48
49#define RQ_QUEUE 0
50
51/*
52 * VIRTIO_BLK requests need at least two descriptors so that device-read-only
53 * buffers are separated from device-writable buffers. For convenience, we
54 * always use three descriptors for the request header, buffer and footer.
55 * We therefore organize the virtque so that first RQ_BUFFERS descriptors are
56 * used for request headers, the following RQ_BUFFERS descriptors are used
57 * for in/out buffers and the last RQ_BUFFERS descriptors are used for request
58 * footers.
59 */
60#define REQ_HEADER_DESC(descno) (0 * RQ_BUFFERS + (descno))
61#define REQ_BUFFER_DESC(descno) (1 * RQ_BUFFERS + (descno))
62#define REQ_FOOTER_DESC(descno) (2 * RQ_BUFFERS + (descno))
63
64static errno_t virtio_blk_dev_add(ddf_dev_t *dev);
65
66static driver_ops_t virtio_blk_driver_ops = {
67 .dev_add = virtio_blk_dev_add
68};
69
70static driver_t virtio_blk_driver = {
71 .name = NAME,
72 .driver_ops = &virtio_blk_driver_ops
73};
74
75/** VirtIO block IRQ handler.
76 *
77 * @param icall IRQ event notification
78 * @param arg Argument (virtio_blk_t *)
79 */
80static void virtio_blk_irq_handler(ipc_call_t *icall, void *arg)
81{
82 virtio_blk_t *virtio_blk = (virtio_blk_t *)arg;
83 virtio_dev_t *vdev = &virtio_blk->virtio_dev;
84
85 uint16_t descno;
86 uint32_t len;
87
88 while (virtio_virtq_consume_used(vdev, RQ_QUEUE, &descno, &len)) {
89 assert(descno < RQ_BUFFERS);
90 fibril_mutex_lock(&virtio_blk->completion_lock[descno]);
91 fibril_condvar_signal(&virtio_blk->completion_cv[descno]);
92 fibril_mutex_unlock(&virtio_blk->completion_lock[descno]);
93 }
94}
95
96static errno_t virtio_blk_register_interrupt(ddf_dev_t *dev)
97{
98 virtio_blk_t *virtio_blk = (virtio_blk_t *) ddf_dev_data_get(dev);
99 virtio_dev_t *vdev = &virtio_blk->virtio_dev;
100
101 async_sess_t *parent_sess = ddf_dev_parent_sess_get(dev);
102 if (parent_sess == NULL)
103 return ENOMEM;
104
105 hw_res_list_parsed_t res;
106 hw_res_list_parsed_init(&res);
107
108 hw_res_list_parsed_init(&res);
109 errno_t rc = hw_res_get_list_parsed(parent_sess, &res, 0);
110 if (rc != EOK)
111 return rc;
112
113 if (res.irqs.count < 1) {
114 hw_res_list_parsed_clean(&res);
115 return EINVAL;
116 }
117
118 virtio_blk->irq = res.irqs.irqs[0];
119 hw_res_list_parsed_clean(&res);
120
121 irq_pio_range_t pio_ranges[] = {
122 {
123 .base = vdev->isr_phys,
124 .size = sizeof(vdev->isr_phys),
125 }
126 };
127
128 irq_cmd_t irq_commands[] = {
129 {
130 .cmd = CMD_PIO_READ_8,
131 .addr = (void *) vdev->isr_phys,
132 .dstarg = 2
133 },
134 {
135 .cmd = CMD_PREDICATE,
136 .value = 1,
137 .srcarg = 2
138 },
139 {
140 .cmd = CMD_ACCEPT
141 }
142 };
143
144 irq_code_t irq_code = {
145 .rangecount = sizeof(pio_ranges) / sizeof(irq_pio_range_t),
146 .ranges = pio_ranges,
147 .cmdcount = sizeof(irq_commands) / sizeof(irq_cmd_t),
148 .cmds = irq_commands
149 };
150
151 return register_interrupt_handler(dev, virtio_blk->irq,
152 virtio_blk_irq_handler, (void *)virtio_blk, &irq_code,
153 &virtio_blk->irq_handle);
154}
155
156static errno_t virtio_blk_bd_open(bd_srvs_t *bds, bd_srv_t *bd)
157{
158 return EOK;
159}
160
161static errno_t virtio_blk_bd_close(bd_srv_t *bd)
162{
163 return EOK;
164}
165
166static errno_t virtio_blk_rw_block(virtio_blk_t *virtio_blk, bool read,
167 aoff64_t ba, void *buf)
168{
169 virtio_dev_t *vdev = &virtio_blk->virtio_dev;
170
171 /*
172 * Allocate a descriptor.
173 *
174 * The allocated descno will determine the header descriptor
175 * (REQ_HEADER_DESC), the buffer descriptor (REQ_BUFFER_DESC) and the
176 * footer (REQ_FOOTER_DESC) descriptor.
177 */
178 fibril_mutex_lock(&virtio_blk->free_lock);
179 uint16_t descno = virtio_alloc_desc(vdev, RQ_QUEUE,
180 &virtio_blk->rq_free_head);
181 while (descno == (uint16_t) -1U) {
182 fibril_condvar_wait(&virtio_blk->free_cv,
183 &virtio_blk->free_lock);
184 descno = virtio_alloc_desc(vdev, RQ_QUEUE,
185 &virtio_blk->rq_free_head);
186 }
187 fibril_mutex_unlock(&virtio_blk->free_lock);
188
189 assert(descno < RQ_BUFFERS);
190
191 /* Setup the request header */
192 virtio_blk_req_header_t *req_header =
193 (virtio_blk_req_header_t *) virtio_blk->rq_header[descno];
194 memset(req_header, 0, sizeof(virtio_blk_req_header_t));
195 pio_write_le32(&req_header->type,
196 read ? VIRTIO_BLK_T_IN : VIRTIO_BLK_T_OUT);
197 pio_write_le64(&req_header->sector, ba);
198
199 /* Copy write data to the request. */
200 if (!read)
201 memcpy(virtio_blk->rq_buf[descno], buf, VIRTIO_BLK_BLOCK_SIZE);
202
203 fibril_mutex_lock(&virtio_blk->completion_lock[descno]);
204
205 /*
206 * Set the descriptors, chain them in the virtqueue and notify the
207 * device.
208 */
209 virtio_virtq_desc_set(vdev, RQ_QUEUE, REQ_HEADER_DESC(descno),
210 virtio_blk->rq_header_p[descno], sizeof(virtio_blk_req_header_t),
211 VIRTQ_DESC_F_NEXT, REQ_BUFFER_DESC(descno));
212 virtio_virtq_desc_set(vdev, RQ_QUEUE, REQ_BUFFER_DESC(descno),
213 virtio_blk->rq_buf_p[descno], VIRTIO_BLK_BLOCK_SIZE,
214 VIRTQ_DESC_F_NEXT | (read ? VIRTQ_DESC_F_WRITE : 0),
215 REQ_FOOTER_DESC(descno));
216 virtio_virtq_desc_set(vdev, RQ_QUEUE, REQ_FOOTER_DESC(descno),
217 virtio_blk->rq_footer_p[descno], sizeof(virtio_blk_req_footer_t),
218 VIRTQ_DESC_F_WRITE, 0);
219 virtio_virtq_produce_available(vdev, RQ_QUEUE, descno);
220
221 /*
222 * Wait for the completion of the request.
223 */
224 fibril_condvar_wait(&virtio_blk->completion_cv[descno],
225 &virtio_blk->completion_lock[descno]);
226 fibril_mutex_unlock(&virtio_blk->completion_lock[descno]);
227
228 errno_t rc;
229 virtio_blk_req_footer_t *footer =
230 (virtio_blk_req_footer_t *) virtio_blk->rq_footer[descno];
231 switch (footer->status) {
232 case VIRTIO_BLK_S_OK:
233 rc = EOK;
234 break;
235 case VIRTIO_BLK_S_IOERR:
236 rc = EIO;
237 break;
238 case VIRTIO_BLK_S_UNSUPP:
239 rc = ENOTSUP;
240 break;
241 default:
242 ddf_msg(LVL_DEBUG, "device returned unknown status=%d\n",
243 (int) footer->status);
244 rc = EIO;
245 break;
246 }
247
248 /* Copy read data from the request */
249 if (rc == EOK && read)
250 memcpy(buf, virtio_blk->rq_buf[descno], VIRTIO_BLK_BLOCK_SIZE);
251
252 /* Free the descriptor and buffer */
253 fibril_mutex_lock(&virtio_blk->free_lock);
254 virtio_free_desc(vdev, RQ_QUEUE, &virtio_blk->rq_free_head, descno);
255 fibril_condvar_signal(&virtio_blk->free_cv);
256 fibril_mutex_unlock(&virtio_blk->free_lock);
257
258 return rc;
259}
260
261static errno_t virtio_blk_bd_rw_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
262 void *buf, size_t size, bool read)
263{
264 virtio_blk_t *virtio_blk = (virtio_blk_t *) bd->srvs->sarg;
265 aoff64_t i;
266 errno_t rc;
267
268 if (size != cnt * VIRTIO_BLK_BLOCK_SIZE)
269 return EINVAL;
270
271 for (i = 0; i < cnt; i++) {
272 rc = virtio_blk_rw_block(virtio_blk, read, ba + i,
273 buf + i * VIRTIO_BLK_BLOCK_SIZE);
274 if (rc != EOK)
275 return rc;
276 }
277
278 return EOK;
279}
280
281static errno_t virtio_blk_bd_read_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
282 void *buf, size_t size)
283{
284 return virtio_blk_bd_rw_blocks(bd, ba, cnt, buf, size, true);
285}
286
287static errno_t virtio_blk_bd_write_blocks(bd_srv_t *bd, aoff64_t ba, size_t cnt,
288 const void *buf, size_t size)
289{
290 return virtio_blk_bd_rw_blocks(bd, ba, cnt, (void *) buf, size, false);
291}
292
293static errno_t virtio_blk_bd_get_block_size(bd_srv_t *bd, size_t *size)
294{
295 *size = VIRTIO_BLK_BLOCK_SIZE;
296 return EOK;
297}
298
299static errno_t virtio_blk_bd_get_num_blocks(bd_srv_t *bd, aoff64_t *nb)
300{
301 virtio_blk_t *virtio_blk = (virtio_blk_t *) bd->srvs->sarg;
302 virtio_blk_cfg_t *blkcfg = virtio_blk->virtio_dev.device_cfg;
303 *nb = pio_read_le64(&blkcfg->capacity);
304 return EOK;
305}
306
307bd_ops_t virtio_blk_bd_ops = {
308 .open = virtio_blk_bd_open,
309 .close = virtio_blk_bd_close,
310 .read_blocks = virtio_blk_bd_read_blocks,
311 .write_blocks = virtio_blk_bd_write_blocks,
312 .get_block_size = virtio_blk_bd_get_block_size,
313 .get_num_blocks = virtio_blk_bd_get_num_blocks,
314};
315
316static errno_t virtio_blk_initialize(ddf_dev_t *dev)
317{
318 virtio_blk_t *virtio_blk = ddf_dev_data_alloc(dev,
319 sizeof(virtio_blk_t));
320 if (!virtio_blk)
321 return ENOMEM;
322
323 fibril_mutex_initialize(&virtio_blk->free_lock);
324 fibril_condvar_initialize(&virtio_blk->free_cv);
325
326 for (unsigned i = 0; i < RQ_BUFFERS; i++) {
327 fibril_mutex_initialize(&virtio_blk->completion_lock[i]);
328 fibril_condvar_initialize(&virtio_blk->completion_cv[i]);
329 }
330
331 bd_srvs_init(&virtio_blk->bds);
332 virtio_blk->bds.ops = &virtio_blk_bd_ops;
333 virtio_blk->bds.sarg = virtio_blk;
334
335 errno_t rc = virtio_pci_dev_initialize(dev, &virtio_blk->virtio_dev);
336 if (rc != EOK)
337 return rc;
338
339 virtio_dev_t *vdev = &virtio_blk->virtio_dev;
340 virtio_pci_common_cfg_t *cfg = virtio_blk->virtio_dev.common_cfg;
341
342 /*
343 * Register IRQ
344 */
345 rc = virtio_blk_register_interrupt(dev);
346 if (rc != EOK)
347 goto fail;
348
349 /* Reset the device and negotiate the feature bits */
350 rc = virtio_device_setup_start(vdev, 0);
351 if (rc != EOK)
352 goto fail;
353
354 /* Perform device-specific setup */
355
356 /*
357 * Discover and configure the virtqueue
358 */
359 uint16_t num_queues = pio_read_le16(&cfg->num_queues);
360 if (num_queues != VIRTIO_BLK_NUM_QUEUES) {
361 ddf_msg(LVL_NOTE, "Unsupported number of virtqueues: %u",
362 num_queues);
363 rc = ELIMIT;
364 goto fail;
365 }
366
367 vdev->queues = calloc(num_queues, sizeof(virtq_t));
368 if (!vdev->queues) {
369 rc = ENOMEM;
370 goto fail;
371 }
372
373 /* For each in/out request we need 3 descriptors */
374 rc = virtio_virtq_setup(vdev, RQ_QUEUE, 3 * RQ_BUFFERS);
375 if (rc != EOK)
376 goto fail;
377
378 /*
379 * Setup DMA buffers
380 */
381 rc = virtio_setup_dma_bufs(RQ_BUFFERS, sizeof(virtio_blk_req_header_t),
382 true, virtio_blk->rq_header, virtio_blk->rq_header_p);
383 if (rc != EOK)
384 goto fail;
385 rc = virtio_setup_dma_bufs(RQ_BUFFERS, VIRTIO_BLK_BLOCK_SIZE,
386 true, virtio_blk->rq_buf, virtio_blk->rq_buf_p);
387 if (rc != EOK)
388 goto fail;
389 rc = virtio_setup_dma_bufs(RQ_BUFFERS, sizeof(virtio_blk_req_footer_t),
390 false, virtio_blk->rq_footer, virtio_blk->rq_footer_p);
391 if (rc != EOK)
392 goto fail;
393
394 /*
395 * Put all request descriptors on a free list. Because of the
396 * correspondence between the request, buffer and footer descriptors,
397 * we only need to manage allocations for one set: the request header
398 * descriptors.
399 */
400 virtio_create_desc_free_list(vdev, RQ_QUEUE, RQ_BUFFERS,
401 &virtio_blk->rq_free_head);
402
403 /*
404 * Enable IRQ
405 */
406 rc = hw_res_enable_interrupt(ddf_dev_parent_sess_get(dev),
407 virtio_blk->irq);
408 if (rc != EOK) {
409 ddf_msg(LVL_NOTE, "Failed to enable interrupt");
410 goto fail;
411 }
412
413 ddf_msg(LVL_NOTE, "Registered IRQ %d", virtio_blk->irq);
414
415 /* Go live */
416 virtio_device_setup_finalize(vdev);
417
418 return EOK;
419
420fail:
421 virtio_teardown_dma_bufs(virtio_blk->rq_header);
422 virtio_teardown_dma_bufs(virtio_blk->rq_buf);
423 virtio_teardown_dma_bufs(virtio_blk->rq_footer);
424
425 virtio_device_setup_fail(vdev);
426 virtio_pci_dev_cleanup(vdev);
427 return rc;
428}
429
430static void virtio_blk_uninitialize(ddf_dev_t *dev)
431{
432 virtio_blk_t *virtio_blk = (virtio_blk_t *) ddf_dev_data_get(dev);
433
434 virtio_teardown_dma_bufs(virtio_blk->rq_header);
435 virtio_teardown_dma_bufs(virtio_blk->rq_buf);
436 virtio_teardown_dma_bufs(virtio_blk->rq_footer);
437
438 virtio_device_setup_fail(&virtio_blk->virtio_dev);
439 virtio_pci_dev_cleanup(&virtio_blk->virtio_dev);
440}
441
442static void virtio_blk_bd_connection(ipc_call_t *icall, void *arg)
443{
444 virtio_blk_t *virtio_blk;
445 ddf_fun_t *fun = (ddf_fun_t *) arg;
446
447 virtio_blk = (virtio_blk_t *) ddf_dev_data_get(ddf_fun_get_dev(fun));
448 bd_conn(icall, &virtio_blk->bds);
449}
450
451static errno_t virtio_blk_dev_add(ddf_dev_t *dev)
452{
453 ddf_msg(LVL_NOTE, "%s %s (handle = %zu)", __func__,
454 ddf_dev_get_name(dev), ddf_dev_get_handle(dev));
455
456 errno_t rc = virtio_blk_initialize(dev);
457 if (rc != EOK)
458 return rc;
459
460 ddf_fun_t *fun = ddf_fun_create(dev, fun_exposed, "port0");
461 if (fun == NULL) {
462 rc = ENOMEM;
463 goto uninitialize;
464 }
465
466 ddf_fun_set_conn_handler(fun, virtio_blk_bd_connection);
467
468 rc = ddf_fun_bind(fun);
469 if (rc != EOK) {
470 ddf_msg(LVL_ERROR, "Failed binding device function");
471 goto destroy;
472 }
473
474 rc = ddf_fun_add_to_category(fun, "disk");
475 if (rc != EOK) {
476 ddf_msg(LVL_ERROR, "Failed adding function to category");
477 goto unbind;
478 }
479
480 ddf_msg(LVL_NOTE, "The %s device has been successfully initialized.",
481 ddf_dev_get_name(dev));
482
483 return EOK;
484
485unbind:
486 ddf_fun_unbind(fun);
487destroy:
488 ddf_fun_destroy(fun);
489uninitialize:
490 virtio_blk_uninitialize(dev);
491 return rc;
492}
493
494int main(void)
495{
496 printf("%s: HelenOS virtio-blk driver\n", NAME);
497
498 (void) ddf_log_init(NAME);
499 return ddf_driver_main(&virtio_blk_driver);
500}
Note: See TracBrowser for help on using the repository browser.