source: mainline/uspace/drv/bus/usb/xhci/trb_ring.c@ ef1a3a8

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ef1a3a8 was 9620a54, checked in by Petr Manek <petr.manek@…>, 8 years ago

Small changes. Temporarily fixed no device problem for endpoint logging. Added similar macro for device logging. Changed log messages to adopt these macros. TRB rings can be freed again. Made ring finalizers noexcept. Upon detach, the entire slot is disabled prior to unregistering endpoints in order to prevent invalid HC commands. Removed active endpoints count from XHCI device. Device context is freed in HC, so DCBAA is not touched from anywhere else.

  • Property mode set to 100644
File size: 9.6 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <assert.h>
31#include <ddi.h>
32#include <as.h>
33#include <align.h>
34#include <usb/debug.h>
35#include <usb/host/utils/malloc32.h>
36#include "hw_struct/trb.h"
37#include "trb_ring.h"
38
39#define SEGMENT_HEADER_SIZE (sizeof(link_t) + sizeof(uintptr_t))
40
41/**
42 * Number of TRBs in a segment (with our header).
43 */
44#define SEGMENT_TRB_COUNT ((PAGE_SIZE - SEGMENT_HEADER_SIZE) / sizeof(xhci_trb_t))
45
46struct trb_segment {
47 xhci_trb_t trb_storage [SEGMENT_TRB_COUNT];
48
49 link_t segments_link;
50 uintptr_t phys;
51} __attribute__((aligned(PAGE_SIZE)));
52
53
54static inline xhci_trb_t *segment_begin(trb_segment_t *segment)
55{
56 return segment->trb_storage;
57}
58
59static inline xhci_trb_t *segment_end(trb_segment_t *segment)
60{
61 return segment_begin(segment) + SEGMENT_TRB_COUNT;
62}
63
64/**
65 * Allocate and initialize new segment.
66 *
67 * TODO: When the HC supports 64-bit addressing, there's no need to restrict
68 * to DMAMEM_4GiB.
69 */
70static int trb_segment_allocate(trb_segment_t **segment)
71{
72 uintptr_t phys;
73 int err;
74
75 *segment = AS_AREA_ANY;
76 err = dmamem_map_anonymous(PAGE_SIZE,
77 DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0, &phys,
78 (void *) segment);
79
80 if (err == EOK) {
81 memset(*segment, 0, PAGE_SIZE);
82 (*segment)->phys = phys;
83
84 usb_log_debug2("Allocated new ring segment.");
85 }
86
87 return err;
88}
89
90/**
91 * Initializes the ring with one segment.
92 * Event when it fails, the structure needs to be finalized.
93 */
94int xhci_trb_ring_init(xhci_trb_ring_t *ring)
95{
96 struct trb_segment *segment;
97 int err;
98
99 list_initialize(&ring->segments);
100
101 if ((err = trb_segment_allocate(&segment)) != EOK)
102 return err;
103
104 list_append(&segment->segments_link, &ring->segments);
105 ring->segment_count = 1;
106
107 xhci_trb_t *last = segment_end(segment) - 1;
108 xhci_trb_link_fill(last, segment->phys);
109 TRB_LINK_SET_TC(*last, true);
110
111 ring->enqueue_segment = segment;
112 ring->enqueue_trb = segment_begin(segment);
113 ring->dequeue = segment->phys;
114 ring->pcs = 1;
115
116 fibril_mutex_initialize(&ring->guard);
117
118 usb_log_debug2("Initialized new TRB ring.");
119
120 return EOK;
121}
122
123void xhci_trb_ring_fini(xhci_trb_ring_t *ring)
124{
125 assert(ring);
126
127 list_foreach_safe(ring->segments, cur, next) {
128 trb_segment_t *segment = list_get_instance(cur, trb_segment_t, segments_link);
129 dmamem_unmap_anonymous(segment);
130 }
131}
132
133/**
134 * When the enqueue pointer targets a Link TRB, resolve it.
135 *
136 * Relies on segments being in the segment list in linked order.
137 *
138 * According to section 4.9.2.2, figure 16, the link TRBs cannot be chained, so
139 * it shall not be called in cycle, nor have an inner cycle.
140 */
141static void trb_ring_resolve_link(xhci_trb_ring_t *ring)
142{
143 link_t *next_segment = list_next(&ring->enqueue_segment->segments_link, &ring->segments);
144 if (!next_segment)
145 next_segment = list_first(&ring->segments);
146
147 ring->enqueue_segment = list_get_instance(next_segment, trb_segment_t, segments_link);
148 ring->enqueue_trb = segment_begin(ring->enqueue_segment);
149}
150
151static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring)
152{
153 uintptr_t trb_id = ring->enqueue_trb - segment_begin(ring->enqueue_segment);
154 return ring->enqueue_segment->phys + trb_id * sizeof(xhci_trb_t);
155}
156
157static bool trb_generates_interrupt(xhci_trb_t *trb)
158{
159 return TRB_TYPE(*trb) >= XHCI_TRB_TYPE_ENABLE_SLOT_CMD
160 || TRB_IOC(*trb);
161}
162
163/**
164 * Enqueue TDs composed of TRBs.
165 *
166 * This will copy specified number of TRBs chained together into the ring. The
167 * cycle flag in TRBs may be changed.
168 *
169 * The copied TRBs must be contiguous in memory, and must not contain Link TRBs.
170 *
171 * We cannot avoid the copying, because the TRB in ring should be updated atomically.
172 *
173 * @param first_trb the first TRB
174 * @param trbs number of TRBS to enqueue
175 * @param phys returns address of the last TRB enqueued
176 * @return EOK on success,
177 * EAGAIN when the ring is too full to fit all TRBs (temporary)
178 */
179int xhci_trb_ring_enqueue_multiple(xhci_trb_ring_t *ring, xhci_trb_t *first_trb,
180 size_t trbs, uintptr_t *phys)
181{
182 assert(trbs > 0);
183 fibril_mutex_lock(&ring->guard);
184
185 xhci_trb_t * const saved_enqueue_trb = ring->enqueue_trb;
186 trb_segment_t * const saved_enqueue_segment = ring->enqueue_segment;
187 if (phys)
188 *phys = (uintptr_t)NULL;
189
190 /*
191 * First, dry run and advance the enqueue pointer to see if the ring would
192 * be full anytime during the transaction.
193 */
194 xhci_trb_t *trb = first_trb;
195 for (size_t i = 0; i < trbs; ++i, ++trb) {
196 if (trb_generates_interrupt(trb)) {
197 if (*phys)
198 return ENOTSUP;
199 *phys = trb_ring_enqueue_phys(ring);
200 }
201
202 ring->enqueue_trb++;
203
204 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK)
205 trb_ring_resolve_link(ring);
206
207 if (trb_ring_enqueue_phys(ring) == ring->dequeue)
208 goto err_again;
209 }
210
211 ring->enqueue_segment = saved_enqueue_segment;
212 ring->enqueue_trb = saved_enqueue_trb;
213
214 /*
215 * Now, copy the TRBs without further checking.
216 */
217 trb = first_trb;
218 for (size_t i = 0; i < trbs; ++i, ++trb) {
219 TRB_SET_CYCLE(*trb, ring->pcs);
220 xhci_trb_copy(ring->enqueue_trb, trb);
221
222 usb_log_debug2("TRB ring(%p): Enqueued TRB %p", ring, trb);
223 ring->enqueue_trb++;
224
225 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK) {
226 TRB_SET_CYCLE(*ring->enqueue_trb, ring->pcs);
227
228 if (TRB_LINK_TC(*ring->enqueue_trb)) {
229 ring->pcs = !ring->pcs;
230 usb_log_debug2("TRB ring(%p): PCS toggled", ring);
231 }
232
233 trb_ring_resolve_link(ring);
234 }
235 }
236
237 fibril_mutex_unlock(&ring->guard);
238 return EOK;
239
240err_again:
241 ring->enqueue_segment = saved_enqueue_segment;
242 ring->enqueue_trb = saved_enqueue_trb;
243 fibril_mutex_unlock(&ring->guard);
244 return EAGAIN;
245}
246
247/**
248 * Enqueue TD composed of a single TRB. See: `xhci_trb_ring_enqueue_multiple`
249 */
250int xhci_trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td, uintptr_t *phys)
251{
252 return xhci_trb_ring_enqueue_multiple(ring, td, 1, phys);
253}
254
255/**
256 * Initializes an event ring.
257 * Even when it fails, the structure needs to be finalized.
258 */
259int xhci_event_ring_init(xhci_event_ring_t *ring)
260{
261 struct trb_segment *segment;
262 int err;
263
264 list_initialize(&ring->segments);
265
266 if ((err = trb_segment_allocate(&segment)) != EOK)
267 return err;
268
269 list_append(&segment->segments_link, &ring->segments);
270 ring->segment_count = 1;
271
272 ring->dequeue_segment = segment;
273 ring->dequeue_trb = segment_begin(segment);
274 ring->dequeue_ptr = segment->phys;
275
276 ring->erst = malloc32(PAGE_SIZE);
277 if (ring->erst == NULL)
278 return ENOMEM;
279 memset(ring->erst, 0, PAGE_SIZE);
280
281 xhci_fill_erst_entry(&ring->erst[0], segment->phys, SEGMENT_TRB_COUNT);
282
283 ring->ccs = 1;
284
285 fibril_mutex_initialize(&ring->guard);
286
287 usb_log_debug("Initialized event ring.");
288
289 return EOK;
290}
291
292void xhci_event_ring_fini(xhci_event_ring_t *ring)
293{
294 list_foreach_safe(ring->segments, cur, next) {
295 trb_segment_t *segment = list_get_instance(cur, trb_segment_t, segments_link);
296 dmamem_unmap_anonymous(segment);
297 }
298
299 if (ring->erst)
300 free32(ring->erst);
301}
302
303static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring)
304{
305 uintptr_t trb_id = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
306 return ring->dequeue_segment->phys + trb_id * sizeof(xhci_trb_t);
307}
308
309/**
310 * Fill the event with next valid event from the ring.
311 *
312 * @param event pointer to event to be overwritten
313 * @return EOK on success,
314 * ENOENT when the ring is empty
315 */
316int xhci_event_ring_dequeue(xhci_event_ring_t *ring, xhci_trb_t *event)
317{
318 fibril_mutex_lock(&ring->guard);
319
320 /**
321 * The ERDP reported to the HC is a half-phase off the one we need to
322 * maintain. Therefore, we keep it extra.
323 */
324 ring->dequeue_ptr = event_ring_dequeue_phys(ring);
325
326 if (TRB_CYCLE(*ring->dequeue_trb) != ring->ccs) {
327 fibril_mutex_unlock(&ring->guard);
328 return ENOENT; /* The ring is empty. */
329 }
330
331 memcpy(event, ring->dequeue_trb, sizeof(xhci_trb_t));
332
333 ring->dequeue_trb++;
334 const unsigned index = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
335
336 /* Wrapping around segment boundary */
337 if (index >= SEGMENT_TRB_COUNT) {
338 link_t *next_segment = list_next(&ring->dequeue_segment->segments_link, &ring->segments);
339
340 /* Wrapping around table boundary */
341 if (!next_segment) {
342 next_segment = list_first(&ring->segments);
343 ring->ccs = !ring->ccs;
344 }
345
346 ring->dequeue_segment = list_get_instance(next_segment, trb_segment_t, segments_link);
347 ring->dequeue_trb = segment_begin(ring->dequeue_segment);
348 }
349
350 fibril_mutex_unlock(&ring->guard);
351 return EOK;
352}
Note: See TracBrowser for help on using the repository browser.