source: mainline/uspace/drv/bus/usb/xhci/trb_ring.c@ 25251bb

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 25251bb was 47ab89e, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

trb ring: fix freeing segments

  • Property mode set to 100644
File size: 9.7 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <assert.h>
31#include <ddi.h>
32#include <as.h>
33#include <align.h>
34#include <usb/debug.h>
35#include <usb/host/utils/malloc32.h>
36#include "hw_struct/trb.h"
37#include "trb_ring.h"
38
39#define SEGMENT_HEADER_SIZE (sizeof(link_t) + sizeof(uintptr_t))
40
41/**
42 * Number of TRBs in a segment (with our header).
43 */
44#define SEGMENT_TRB_COUNT ((PAGE_SIZE - SEGMENT_HEADER_SIZE) / sizeof(xhci_trb_t))
45
46struct trb_segment {
47 xhci_trb_t trb_storage [SEGMENT_TRB_COUNT];
48
49 link_t segments_link;
50 uintptr_t phys;
51} __attribute__((aligned(PAGE_SIZE)));
52
53
54static inline xhci_trb_t *segment_begin(trb_segment_t *segment)
55{
56 return segment->trb_storage;
57}
58
59static inline xhci_trb_t *segment_end(trb_segment_t *segment)
60{
61 return segment_begin(segment) + SEGMENT_TRB_COUNT;
62}
63
64/**
65 * Allocate and initialize new segment.
66 *
67 * TODO: When the HC supports 64-bit addressing, there's no need to restrict
68 * to DMAMEM_4GiB.
69 */
70static int trb_segment_allocate(trb_segment_t **segment)
71{
72 uintptr_t phys;
73 int err;
74
75 *segment = AS_AREA_ANY;
76 err = dmamem_map_anonymous(PAGE_SIZE,
77 DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0, &phys,
78 (void *) segment);
79
80 if (err == EOK) {
81 memset(*segment, 0, PAGE_SIZE);
82 (*segment)->phys = phys;
83
84 usb_log_debug2("Allocated new ring segment.");
85 }
86
87 return err;
88}
89
90/**
91 * Initializes the ring with one segment.
92 * Event when it fails, the structure needs to be finalized.
93 */
94int xhci_trb_ring_init(xhci_trb_ring_t *ring)
95{
96 struct trb_segment *segment;
97 int err;
98
99 list_initialize(&ring->segments);
100
101 if ((err = trb_segment_allocate(&segment)) != EOK)
102 return err;
103
104 list_append(&segment->segments_link, &ring->segments);
105 ring->segment_count = 1;
106
107 xhci_trb_t *last = segment_end(segment) - 1;
108 xhci_trb_link_fill(last, segment->phys);
109 xhci_trb_set_cycle(last, true);
110
111 ring->enqueue_segment = segment;
112 ring->enqueue_trb = segment_begin(segment);
113 ring->dequeue = segment->phys;
114 ring->pcs = 1;
115
116 fibril_mutex_initialize(&ring->guard);
117
118 usb_log_debug2("Initialized new TRB ring.");
119
120 return EOK;
121}
122
123int xhci_trb_ring_fini(xhci_trb_ring_t *ring)
124{
125 assert(ring);
126
127 list_foreach_safe(ring->segments, cur, next) {
128 trb_segment_t *segment = list_get_instance(cur, trb_segment_t, segments_link);
129 dmamem_unmap_anonymous(segment);
130 }
131
132 return EOK;
133}
134
135/**
136 * When the enqueue pointer targets a Link TRB, resolve it.
137 *
138 * Relies on segments being in the segment list in linked order.
139 *
140 * According to section 4.9.2.2, figure 16, the link TRBs cannot be chained, so
141 * it shall not be called in cycle, nor have an inner cycle.
142 */
143static void trb_ring_resolve_link(xhci_trb_ring_t *ring)
144{
145 link_t *next_segment = list_next(&ring->enqueue_segment->segments_link, &ring->segments);
146 if (!next_segment)
147 next_segment = list_first(&ring->segments);
148
149 ring->enqueue_segment = list_get_instance(next_segment, trb_segment_t, segments_link);
150 ring->enqueue_trb = segment_begin(ring->enqueue_segment);
151}
152
153static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring)
154{
155 uintptr_t trb_id = ring->enqueue_trb - segment_begin(ring->enqueue_segment);
156 return ring->enqueue_segment->phys + trb_id * sizeof(xhci_trb_t);
157}
158
159static bool trb_generates_interrupt(xhci_trb_t *trb)
160{
161 return TRB_TYPE(*trb) >= XHCI_TRB_TYPE_ENABLE_SLOT_CMD
162 || TRB_IOC(*trb);
163}
164
165/**
166 * Enqueue TDs composed of TRBs.
167 *
168 * This will copy specified number of TRBs chained together into the ring. The
169 * cycle flag in TRBs may be changed.
170 *
171 * The copied TRBs must be contiguous in memory, and must not contain Link TRBs.
172 *
173 * We cannot avoid the copying, because the TRB in ring should be updated atomically.
174 *
175 * @param first_trb the first TRB
176 * @param trbs number of TRBS to enqueue
177 * @param phys returns address of the last TRB enqueued
178 * @return EOK on success,
179 * EAGAIN when the ring is too full to fit all TRBs (temporary)
180 */
181int xhci_trb_ring_enqueue_multiple(xhci_trb_ring_t *ring, xhci_trb_t *first_trb,
182 size_t trbs, uintptr_t *phys)
183{
184 assert(trbs > 0);
185 fibril_mutex_lock(&ring->guard);
186
187 xhci_trb_t * const saved_enqueue_trb = ring->enqueue_trb;
188 trb_segment_t * const saved_enqueue_segment = ring->enqueue_segment;
189 if (phys)
190 *phys = (uintptr_t)NULL;
191
192 /*
193 * First, dry run and advance the enqueue pointer to see if the ring would
194 * be full anytime during the transaction.
195 */
196 xhci_trb_t *trb = first_trb;
197 for (size_t i = 0; i < trbs; ++i, ++trb) {
198 if (trb_generates_interrupt(trb)) {
199 if (*phys)
200 return ENOTSUP;
201 *phys = trb_ring_enqueue_phys(ring);
202 }
203
204 ring->enqueue_trb++;
205
206 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK)
207 trb_ring_resolve_link(ring);
208
209 if (trb_ring_enqueue_phys(ring) == ring->dequeue)
210 goto err_again;
211 }
212
213 ring->enqueue_segment = saved_enqueue_segment;
214 ring->enqueue_trb = saved_enqueue_trb;
215
216 /*
217 * Now, copy the TRBs without further checking.
218 */
219 trb = first_trb;
220 for (size_t i = 0; i < trbs; ++i, ++trb) {
221 xhci_trb_set_cycle(trb, ring->pcs);
222 xhci_trb_copy(ring->enqueue_trb, trb);
223
224 usb_log_debug2("TRB ring(%p): Enqueued TRB %p", ring, trb);
225 ring->enqueue_trb++;
226
227 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK) {
228 // XXX: Check, whether the order here is correct (ambiguous instructions in 4.11.5.1)
229 xhci_trb_set_cycle(ring->enqueue_trb, ring->pcs);
230
231 if (TRB_LINK_TC(*ring->enqueue_trb)) {
232 ring->pcs = !ring->pcs;
233 usb_log_debug2("TRB ring(%p): PCS toggled", ring);
234 }
235
236 trb_ring_resolve_link(ring);
237 }
238 }
239
240 fibril_mutex_unlock(&ring->guard);
241 return EOK;
242
243err_again:
244 ring->enqueue_segment = saved_enqueue_segment;
245 ring->enqueue_trb = saved_enqueue_trb;
246 fibril_mutex_unlock(&ring->guard);
247 return EAGAIN;
248}
249
250/**
251 * Enqueue TD composed of a single TRB. See: `xhci_trb_ring_enqueue_multiple`
252 */
253int xhci_trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td, uintptr_t *phys)
254{
255 return xhci_trb_ring_enqueue_multiple(ring, td, 1, phys);
256}
257
258/**
259 * Initializes an event ring.
260 * Even when it fails, the structure needs to be finalized.
261 */
262int xhci_event_ring_init(xhci_event_ring_t *ring)
263{
264 struct trb_segment *segment;
265 int err;
266
267 list_initialize(&ring->segments);
268
269 if ((err = trb_segment_allocate(&segment)) != EOK)
270 return err;
271
272 list_append(&segment->segments_link, &ring->segments);
273 ring->segment_count = 1;
274
275 ring->dequeue_segment = segment;
276 ring->dequeue_trb = segment_begin(segment);
277 ring->dequeue_ptr = segment->phys;
278
279 ring->erst = malloc32(PAGE_SIZE);
280 if (ring->erst == NULL)
281 return ENOMEM;
282 memset(ring->erst, 0, PAGE_SIZE);
283
284 xhci_fill_erst_entry(&ring->erst[0], segment->phys, SEGMENT_TRB_COUNT);
285
286 ring->ccs = 1;
287
288 fibril_mutex_initialize(&ring->guard);
289
290 usb_log_debug("Initialized event ring.");
291
292 return EOK;
293}
294
295int xhci_event_ring_fini(xhci_event_ring_t *ring)
296{
297 list_foreach_safe(ring->segments, cur, next) {
298 trb_segment_t *segment = list_get_instance(cur, trb_segment_t, segments_link);
299 dmamem_unmap_anonymous(segment);
300 }
301
302 if (ring->erst)
303 free32(ring->erst);
304
305 return EOK;
306}
307
308static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring)
309{
310 uintptr_t trb_id = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
311 return ring->dequeue_segment->phys + trb_id * sizeof(xhci_trb_t);
312}
313
314/**
315 * Fill the event with next valid event from the ring.
316 *
317 * @param event pointer to event to be overwritten
318 * @return EOK on success,
319 * ENOENT when the ring is empty
320 */
321int xhci_event_ring_dequeue(xhci_event_ring_t *ring, xhci_trb_t *event)
322{
323 fibril_mutex_lock(&ring->guard);
324
325 /**
326 * The ERDP reported to the HC is a half-phase off the one we need to
327 * maintain. Therefore, we keep it extra.
328 */
329 ring->dequeue_ptr = event_ring_dequeue_phys(ring);
330
331 if (TRB_CYCLE(*ring->dequeue_trb) != ring->ccs) {
332 fibril_mutex_unlock(&ring->guard);
333 return ENOENT; /* The ring is empty. */
334 }
335
336 memcpy(event, ring->dequeue_trb, sizeof(xhci_trb_t));
337
338 ring->dequeue_trb++;
339 const unsigned index = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
340
341 /* Wrapping around segment boundary */
342 if (index >= SEGMENT_TRB_COUNT) {
343 link_t *next_segment = list_next(&ring->dequeue_segment->segments_link, &ring->segments);
344
345 /* Wrapping around table boundary */
346 if (!next_segment) {
347 next_segment = list_first(&ring->segments);
348 ring->ccs = !ring->ccs;
349 }
350
351 ring->dequeue_segment = list_get_instance(next_segment, trb_segment_t, segments_link);
352 ring->dequeue_trb = segment_begin(ring->dequeue_segment);
353 }
354
355 fibril_mutex_unlock(&ring->guard);
356 return EOK;
357}
Note: See TracBrowser for help on using the repository browser.