source: mainline/uspace/drv/bus/usb/xhci/trb_ring.c@ 9af3281

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9af3281 was 9af3281, checked in by Jaroslav Jindrak <dzejrou@…>, 8 years ago

Added initialization of allocated ERST segments, previously garbage data caused an invalid segment size to be passed to the xHC which in result collapsed and did not respon to commands.

  • Property mode set to 100644
File size: 8.6 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <assert.h>
31#include <ddi.h>
32#include <as.h>
33#include <align.h>
34#include <usb/debug.h>
35#include <usb/host/utils/malloc32.h>
36#include "hw_struct/trb.h"
37#include "trb_ring.h"
38
39#define SEGMENT_HEADER_SIZE (sizeof(link_t) + sizeof(uintptr_t))
40
41/**
42 * Number of TRBs in a segment (with our header).
43 */
44#define SEGMENT_TRB_COUNT ((PAGE_SIZE - SEGMENT_HEADER_SIZE) / sizeof(xhci_trb_t))
45
46struct trb_segment {
47 xhci_trb_t trb_storage [SEGMENT_TRB_COUNT];
48
49 link_t segments_link;
50 uintptr_t phys;
51} __attribute__((aligned(PAGE_SIZE)));
52
53
54static inline xhci_trb_t *segment_begin(trb_segment_t *segment)
55{
56 return segment->trb_storage;
57}
58
59static inline xhci_trb_t *segment_end(trb_segment_t *segment)
60{
61 return segment_begin(segment) + SEGMENT_TRB_COUNT;
62}
63
64/**
65 * Allocate and initialize new segment.
66 *
67 * TODO: When the HC supports 64-bit addressing, there's no need to restrict
68 * to DMAMEM_4GiB.
69 */
70static int trb_segment_allocate(trb_segment_t **segment)
71{
72 uintptr_t phys;
73 int err;
74
75 *segment = AS_AREA_ANY;
76 err = dmamem_map_anonymous(PAGE_SIZE,
77 DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0, &phys,
78 (void *) segment);
79
80 if (err == EOK) {
81 memset(*segment, 0, PAGE_SIZE);
82 (*segment)->phys = phys;
83
84 usb_log_debug2("Allocated new ring segment.");
85 }
86
87 return err;
88}
89
90/**
91 * Initializes the ring with one segment.
92 * Event when it fails, the structure needs to be finalized.
93 */
94int xhci_trb_ring_init(xhci_trb_ring_t *ring, xhci_hc_t *hc)
95{
96 struct trb_segment *segment;
97 int err;
98
99 list_initialize(&ring->segments);
100
101 if ((err = trb_segment_allocate(&segment)) != EOK)
102 return err;
103
104 list_append(&segment->segments_link, &ring->segments);
105 ring->segment_count = 1;
106
107 xhci_trb_t *last = segment_end(segment) - 1;
108 xhci_trb_link_fill(last, segment->phys);
109 xhci_trb_set_cycle(last, true);
110
111 ring->enqueue_segment = segment;
112 ring->enqueue_trb = segment_begin(segment);
113 ring->dequeue = segment->phys;
114 ring->pcs = 1;
115
116 usb_log_debug("Initialized new TRB ring.");
117
118 return EOK;
119}
120
121int xhci_trb_ring_fini(xhci_trb_ring_t *ring)
122{
123 list_foreach(ring->segments, segments_link, trb_segment_t, segment)
124 dmamem_unmap_anonymous(segment);
125 return EOK;
126}
127
128/**
129 * When the enqueue pointer targets a Link TRB, resolve it.
130 *
131 * Relies on segments being in the segment list in linked order.
132 *
133 * According to section 4.9.2.2, figure 16, the link TRBs cannot be chained, so
134 * it shall not be called in cycle, nor have an inner cycle.
135 */
136static void trb_ring_resolve_link(xhci_trb_ring_t *ring)
137{
138 link_t *next_segment = list_next(&ring->enqueue_segment->segments_link, &ring->segments);
139 if (!next_segment)
140 next_segment = list_first(&ring->segments);
141
142 ring->enqueue_segment = list_get_instance(next_segment, trb_segment_t, segments_link);
143 ring->enqueue_trb = segment_begin(ring->enqueue_segment);
144}
145
146static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring)
147{
148 uintptr_t trb_id = ring->enqueue_trb - segment_begin(ring->enqueue_segment);
149 return ring->enqueue_segment->phys + trb_id * sizeof(xhci_trb_t);
150}
151
152/**
153 * Enqueue a TD composed of TRBs.
154 *
155 * This will copy all TRBs chained together into the ring. The cycle flag in
156 * TRBs may be changed.
157 *
158 * The chained TRBs must be contiguous in memory, and must not contain Link TRBs.
159 *
160 * We cannot avoid the copying, because the TRB in ring should be updated atomically.
161 *
162 * @param td the first TRB of TD
163 * @return EOK on success,
164 * EAGAIN when the ring is too full to fit all TRBs (temporary)
165 */
166int xhci_trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td)
167{
168 xhci_trb_t * const saved_enqueue_trb = ring->enqueue_trb;
169 trb_segment_t * const saved_enqueue_segment = ring->enqueue_segment;
170
171 /*
172 * First, dry run and advance the enqueue pointer to see if the ring would
173 * be full anytime during the transaction.
174 */
175 xhci_trb_t *trb = td;
176 do {
177 ring->enqueue_trb++;
178
179 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK)
180 trb_ring_resolve_link(ring);
181
182 if (trb_ring_enqueue_phys(ring) == ring->dequeue)
183 goto err_again;
184 } while (xhci_trb_is_chained(trb++));
185
186 ring->enqueue_segment = saved_enqueue_segment;
187 ring->enqueue_trb = saved_enqueue_trb;
188
189 /*
190 * Now, copy the TRBs without further checking.
191 */
192 trb = td;
193 do {
194 xhci_trb_set_cycle(trb, ring->pcs);
195 xhci_trb_copy(ring->enqueue_trb, trb);
196
197 usb_log_debug2("TRB ring(%p): Enqueued TRB %p", ring, trb);
198 ring->enqueue_trb++;
199
200 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK) {
201 // XXX: Check, whether the order here is correct (ambiguous instructions in 4.11.5.1)
202 xhci_trb_set_cycle(ring->enqueue_trb, ring->pcs);
203
204 if (TRB_LINK_TC(*ring->enqueue_trb)) {
205 ring->pcs = !ring->pcs;
206 usb_log_debug2("TRB ring(%p): PCS toggled", ring);
207 }
208
209 trb_ring_resolve_link(ring);
210 }
211 } while (xhci_trb_is_chained(trb++));
212
213 return EOK;
214
215err_again:
216 ring->enqueue_segment = saved_enqueue_segment;
217 ring->enqueue_trb = saved_enqueue_trb;
218 return EAGAIN;
219}
220
221/**
222 * Initializes an event ring.
223 * Even when it fails, the structure needs to be finalized.
224 */
225int xhci_event_ring_init(xhci_event_ring_t *ring, xhci_hc_t *hc)
226{
227 struct trb_segment *segment;
228 int err;
229
230 list_initialize(&ring->segments);
231
232 if ((err = trb_segment_allocate(&segment)) != EOK)
233 return err;
234
235 list_append(&segment->segments_link, &ring->segments);
236 ring->segment_count = 1;
237
238 ring->dequeue_segment = segment;
239 ring->dequeue_trb = segment_begin(segment);
240 ring->dequeue_ptr = segment->phys;
241
242 ring->erst = malloc32(PAGE_SIZE);
243 if (ring->erst == NULL)
244 return ENOMEM;
245 memset(ring->erst, 0, PAGE_SIZE);
246
247 xhci_fill_erst_entry(&ring->erst[0], segment->phys, SEGMENT_TRB_COUNT);
248
249 ring->ccs = 1;
250
251 usb_log_debug("Initialized event ring.");
252
253 return EOK;
254}
255
256int xhci_event_ring_fini(xhci_event_ring_t *ring)
257{
258 list_foreach(ring->segments, segments_link, trb_segment_t, segment)
259 dmamem_unmap_anonymous(segment);
260
261 if (ring->erst)
262 free32(ring->erst);
263
264 return EOK;
265}
266
267static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring)
268{
269 uintptr_t trb_id = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
270 return ring->dequeue_segment->phys + trb_id * sizeof(xhci_trb_t);
271}
272
273/**
274 * Fill the event with next valid event from the ring.
275 *
276 * @param event pointer to event to be overwritten
277 * @return EOK on success,
278 * ENOENT when the ring is empty
279 */
280int xhci_event_ring_dequeue(xhci_event_ring_t *ring, xhci_trb_t *event)
281{
282 /**
283 * The ERDP reported to the HC is a half-phase off the one we need to
284 * maintain. Therefore, we keep it extra.
285 */
286 ring->dequeue_ptr = event_ring_dequeue_phys(ring);
287
288 if (TRB_CYCLE(*ring->dequeue_trb) != ring->ccs)
289 return ENOENT; /* The ring is empty. */
290
291 memcpy(event, ring->dequeue_trb, sizeof(xhci_trb_t));
292
293 ring->dequeue_trb++;
294 const unsigned index = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
295
296 /* Wrapping around segment boundary */
297 if (index >= SEGMENT_TRB_COUNT) {
298 link_t *next_segment = list_next(&ring->dequeue_segment->segments_link, &ring->segments);
299
300 /* Wrapping around table boundary */
301 if (!next_segment) {
302 next_segment = list_first(&ring->segments);
303 ring->ccs = !ring->ccs;
304 }
305
306 ring->dequeue_segment = list_get_instance(next_segment, trb_segment_t, segments_link);
307 ring->dequeue_trb = segment_begin(ring->dequeue_segment);
308 }
309
310
311 return EOK;
312}
Note: See TracBrowser for help on using the repository browser.