Open FFBoard
Open source force feedback firmware
hcd_dwc2.c
Go to the documentation of this file.
1/*
2 * The MIT License (MIT)
3 *
4 * Copyright (c) 2024 Ha Thach (tinyusb.org)
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 *
24 * This file is part of the TinyUSB stack.
25 */
26
27#include "tusb_option.h"
28
29#if CFG_TUH_ENABLED && defined(TUP_USBIP_DWC2)
30
31// Debug level for DWC2
32#define DWC2_DEBUG 2
33
34#include "host/hcd.h"
35#include "dwc2_common.h"
36
37// Max number of endpoints application can open, can be larger than DWC2_CHANNEL_COUNT_MAX
38#ifndef CFG_TUH_DWC2_ENDPOINT_MAX
39#define CFG_TUH_DWC2_ENDPOINT_MAX 16
40#endif
41
42#define DWC2_CHANNEL_COUNT_MAX 16 // absolute max channel count
43#define DWC2_CHANNEL_COUNT(_dwc2) tu_min8((_dwc2)->ghwcfg2_bm.num_host_ch + 1, DWC2_CHANNEL_COUNT_MAX)
44
45TU_VERIFY_STATIC(CFG_TUH_DWC2_ENDPOINT_MAX <= 255, "currently only use 8-bit for index");
46
47enum {
48 HPRT_W1_MASK = HPRT_CONN_DETECT | HPRT_ENABLE | HPRT_ENABLE_CHANGE | HPRT_OVER_CURRENT_CHANGE | HPRT_SUSPEND
49};
50
51enum {
53};
54
55enum {
57};
58
59//--------------------------------------------------------------------
60//
61//--------------------------------------------------------------------
62
63// Host driver struct for each opened endpoint
64typedef struct {
65 union {
66 uint32_t hcchar;
68 };
69 union {
70 uint32_t hcsplt;
72 };
73
75 uint32_t uframe_interval : 18; // micro-frame interval
76 uint32_t speed : 2;
77 uint32_t next_pid : 2;
78 uint32_t do_ping : 1;
79 // uint32_t : 9;
80 };
81
82 uint32_t uframe_countdown; // micro-frame count down to transfer for periodic, only need 18-bit
83
84 uint8_t* buffer;
85 uint16_t buflen;
87
88// Additional info for each channel when it is active
89typedef struct {
90 volatile bool allocated;
91 uint8_t ep_id;
93 uint8_t err_count : 3;
95 uint8_t halted_nyet : 1;
97 };
98 uint8_t result;
99
100 uint16_t xferred_bytes; // bytes that accumulate transferred though USB bus for the whole hcd_edpt_xfer(), which can
101 // be composed of multiple channel_xfer_start() (retry with NAK/NYET)
102 uint16_t fifo_bytes; // bytes written/read from/to FIFO (may not be transferred on USB bus).
103} hcd_xfer_t;
104
105typedef struct {
106 hcd_xfer_t xfer[DWC2_CHANNEL_COUNT_MAX];
107 hcd_endpoint_t edpt[CFG_TUH_DWC2_ENDPOINT_MAX];
108} hcd_data_t;
109
111
112//--------------------------------------------------------------------
113//
114//--------------------------------------------------------------------
115TU_ATTR_ALWAYS_INLINE static inline tusb_speed_t hprt_speed_get(dwc2_regs_t* dwc2) {
116 tusb_speed_t speed;
117 switch(dwc2->hprt_bm.speed) {
118 case HPRT_SPEED_HIGH: speed = TUSB_SPEED_HIGH; break;
119 case HPRT_SPEED_FULL: speed = TUSB_SPEED_FULL; break;
120 case HPRT_SPEED_LOW : speed = TUSB_SPEED_LOW ; break;
121 default:
122 speed = TUSB_SPEED_INVALID;
123 TU_BREAKPOINT();
124 break;
125 }
126 return speed;
127}
128
129TU_ATTR_ALWAYS_INLINE static inline bool dma_host_enabled(const dwc2_regs_t* dwc2) {
130 (void) dwc2;
131 // Internal DMA only
132 return CFG_TUH_DWC2_DMA_ENABLE && dwc2->ghwcfg2_bm.arch == GHWCFG2_ARCH_INTERNAL_DMA;
133}
134
135// Allocate a channel for new transfer
136TU_ATTR_ALWAYS_INLINE static inline uint8_t channel_alloc(dwc2_regs_t* dwc2) {
137 const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
138 for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
139 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
140 if (!xfer->allocated) {
141 tu_memclr(xfer, sizeof(hcd_xfer_t));
142 xfer->allocated = true;
143 return ch_id;
144 }
145 }
147}
148
149// Check if is periodic (interrupt/isochronous)
150TU_ATTR_ALWAYS_INLINE static inline bool edpt_is_periodic(uint8_t ep_type) {
151 return ep_type == HCCHAR_EPTYPE_INTERRUPT || ep_type == HCCHAR_EPTYPE_ISOCHRONOUS;
152}
153
154TU_ATTR_ALWAYS_INLINE static inline uint8_t req_queue_avail(const dwc2_regs_t* dwc2, bool is_period) {
155 if (is_period) {
156 return dwc2->hptxsts_bm.req_queue_available;
157 } else {
158 return dwc2->hnptxsts_bm.req_queue_available;
159 }
160}
161
162TU_ATTR_ALWAYS_INLINE static inline void channel_dealloc(dwc2_regs_t* dwc2, uint8_t ch_id) {
163 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
164 xfer->allocated = false;
165 dwc2->haintmsk &= ~TU_BIT(ch_id);
166}
167
168TU_ATTR_ALWAYS_INLINE static inline bool channel_disable(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
169 // disable also require request queue
170 TU_ASSERT(req_queue_avail(dwc2, edpt_is_periodic(channel->hcchar_bm.ep_type)));
171 channel->hcintmsk |= HCINT_HALTED;
172 channel->hcchar |= HCCHAR_CHDIS | HCCHAR_CHENA; // must set both CHDIS and CHENA
173 return true;
174}
175
176// attempt to send IN token to receive data
177TU_ATTR_ALWAYS_INLINE static inline bool channel_send_in_token(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
178 TU_ASSERT(req_queue_avail(dwc2, edpt_is_periodic(channel->hcchar_bm.ep_type)));
179 channel->hcchar |= HCCHAR_CHENA;
180 return true;
181}
182
183// Find currently enabled channel. Note: EP0 is bidirectional
184TU_ATTR_ALWAYS_INLINE static inline uint8_t channel_find_enabled(dwc2_regs_t* dwc2, uint8_t dev_addr, uint8_t ep_num, uint8_t ep_dir) {
185 const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
186 for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
187 if (_hcd_data.xfer[ch_id].allocated) {
188 const dwc2_channel_char_t hcchar_bm = dwc2->channel[ch_id].hcchar_bm;
189 if (hcchar_bm.dev_addr == dev_addr && hcchar_bm.ep_num == ep_num && (ep_num == 0 || hcchar_bm.ep_dir == ep_dir)) {
190 return ch_id;
191 }
192 }
193 }
195}
196
197
198// Allocate a new endpoint
199TU_ATTR_ALWAYS_INLINE static inline uint8_t edpt_alloc(void) {
200 for (uint32_t i = 0; i < CFG_TUH_DWC2_ENDPOINT_MAX; i++) {
201 hcd_endpoint_t* edpt = &_hcd_data.edpt[i];
202 if (edpt->hcchar_bm.enable == 0) {
203 tu_memclr(edpt, sizeof(hcd_endpoint_t));
204 edpt->hcchar_bm.enable = 1;
205 return i;
206 }
207 }
209}
210
211// Find a endpoint that is opened previously with hcd_edpt_open()
212// Note: EP0 is bidirectional
213TU_ATTR_ALWAYS_INLINE static inline uint8_t edpt_find_opened(uint8_t dev_addr, uint8_t ep_num, uint8_t ep_dir) {
214 for (uint8_t i = 0; i < (uint8_t)CFG_TUH_DWC2_ENDPOINT_MAX; i++) {
215 const dwc2_channel_char_t* hcchar_bm = &_hcd_data.edpt[i].hcchar_bm;
216 if (hcchar_bm->enable && hcchar_bm->dev_addr == dev_addr &&
217 hcchar_bm->ep_num == ep_num && (ep_num == 0 || hcchar_bm->ep_dir == ep_dir)) {
218 return i;
219 }
220 }
222}
223
224TU_ATTR_ALWAYS_INLINE static inline uint16_t cal_packet_count(uint16_t len, uint16_t ep_size) {
225 if (len == 0) {
226 return 1;
227 } else {
228 return tu_div_ceil(len, ep_size);
229 }
230}
231
232TU_ATTR_ALWAYS_INLINE static inline uint8_t cal_next_pid(uint8_t pid, uint8_t packet_count) {
233 if (packet_count & 0x01) {
234 return pid ^ 0x02; // toggle DATA0 and DATA1
235 } else {
236 return pid;
237 }
238}
239
240//--------------------------------------------------------------------
241//
242//--------------------------------------------------------------------
243
244/* USB Data FIFO Layout
245
246 The FIFO is split up into
247 - EPInfo: for storing DMA metadata (check dcd_dwc2.c for more details)
248 - 1 RX FIFO: for receiving data
249 - 1 TX FIFO for non-periodic (NPTX)
250 - 1 TX FIFO for periodic (PTX)
251
252 We allocated TX FIFO from top to bottom (using top pointer), this to allow the RX FIFO to grow dynamically which is
253 possible since the free space is located between the RX and TX FIFOs.
254
255 ----------------- ep_fifo_size
256 | HCDMAn |
257 |--------------|-- gdfifocfg.EPINFOBASE (max is ghwcfg3.dfifo_depth)
258 | Non-Periodic |
259 | TX FIFO |
260 |--------------|--- GNPTXFSIZ.addr (fixed size)
261 | Periodic |
262 | TX FIFO |
263 |--------------|--- HPTXFSIZ.addr (expandable downward)
264 | FREE |
265 | |
266 |--------------|-- GRXFSIZ (expandable upward)
267 | RX FIFO |
268 ---------------- 0
269*/
270
271/* Programming Guide 2.1.2 FIFO RAM allocation
272 * RX
273 * - Largest-EPsize/4 + 2 (status info). recommended x2 if high bandwidth or multiple ISO are used.
274 * - 2 for transfer complete and channel halted status
275 * - 1 for each Control/Bulk out endpoint to Handle NAK/NYET (i.e max is number of host channel)
276 *
277 * TX non-periodic (NPTX)
278 * - At least largest-EPsize/4, recommended x2
279 *
280 * TX periodic (PTX)
281 * - At least largest-EPsize*MulCount/4 (MulCount up to 3 for high-bandwidth ISO/interrupt)
282*/
283static void dfifo_host_init(uint8_t rhport) {
284 const dwc2_controller_t* dwc2_controller = &_dwc2_controller[rhport];
285 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
286
287 // Scatter/Gather DMA mode is not yet supported. Buffer DMA only need 1 words per channel
288 const bool is_dma = dma_host_enabled(dwc2);
289 uint16_t dfifo_top = dwc2_controller->ep_fifo_size/4;
290 if (is_dma) {
291 dfifo_top -= dwc2->ghwcfg2_bm.num_host_ch;
292 }
293
294 // fixed allocation for now, improve later:
295 // - ptx_largest is limited to 256 for FS since most FS core only has 1024 bytes total
296 bool is_highspeed = dwc2_core_is_highspeed(dwc2, TUSB_ROLE_HOST);
297 uint32_t nptx_largest = is_highspeed ? TUSB_EPSIZE_BULK_HS/4 : TUSB_EPSIZE_BULK_FS/4;
298 uint32_t ptx_largest = is_highspeed ? TUSB_EPSIZE_ISO_HS_MAX/4 : 256/4;
299
300 uint16_t nptxfsiz = 2 * nptx_largest;
301 uint16_t rxfsiz = 2 * (ptx_largest + 2) + dwc2->ghwcfg2_bm.num_host_ch;
302 TU_ASSERT(dfifo_top >= (nptxfsiz + rxfsiz),);
303 uint16_t ptxfsiz = dfifo_top - (nptxfsiz + rxfsiz);
304
305 dwc2->gdfifocfg = (dfifo_top << GDFIFOCFG_EPINFOBASE_SHIFT) | dfifo_top;
306
307 dfifo_top -= rxfsiz;
308 dwc2->grxfsiz = rxfsiz;
309
310 dfifo_top -= nptxfsiz;
311 dwc2->gnptxfsiz = tu_u32_from_u16(nptxfsiz, dfifo_top);
312
313 dfifo_top -= ptxfsiz;
314 dwc2->hptxfsiz = tu_u32_from_u16(ptxfsiz, dfifo_top);
315}
316
317//--------------------------------------------------------------------+
318// Controller API
319//--------------------------------------------------------------------+
320
321// optional hcd configuration, called by tuh_configure()
322bool hcd_configure(uint8_t rhport, uint32_t cfg_id, const void* cfg_param) {
323 (void) rhport;
324 (void) cfg_id;
325 (void) cfg_param;
326
327 return true;
328}
329
330// Initialize controller to host mode
331bool hcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
332 (void) rh_init;
333 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
334
335 tu_memclr(&_hcd_data, sizeof(_hcd_data));
336
337 // Core Initialization
338 const bool is_highspeed = dwc2_core_is_highspeed(dwc2, TUSB_ROLE_HOST);
339 TU_ASSERT(dwc2_core_init(rhport, is_highspeed));
340
341 if (dma_host_enabled(dwc2)) {
342 // DMA seems to be only settable after a core reset, and not possible to switch on-the-fly
343 dwc2->gahbcfg |= GAHBCFG_DMAEN | GAHBCFG_HBSTLEN_2;
344 } else {
345 dwc2->gintmsk |= GINTSTS_RXFLVL;
346 }
347
348 //------------- 3.1 Host Initialization -------------//
349
350 // work at max supported speed
351 dwc2->hcfg &= ~HCFG_FSLS_ONLY;
352
353 // Enable HFIR reload
354 if (dwc2->gsnpsid >= DWC2_CORE_REV_2_92a) {
355 dwc2->hfir |= HFIR_RELOAD_CTRL;
356 }
357
358 // force host mode and wait for mode switch
359 dwc2->gusbcfg = (dwc2->gusbcfg & ~GUSBCFG_FDMOD) | GUSBCFG_FHMOD;
360 while ((dwc2->gintsts & GINTSTS_CMOD) != GINTSTS_CMODE_HOST) {}
361
362 // configure fixed-allocated fifo scheme
363 dfifo_host_init(rhport);
364
365 dwc2->hprt = HPRT_W1_MASK; // clear all write-1-clear bits
366 dwc2->hprt = HPRT_POWER; // turn on VBUS
367
368 // Enable required interrupts
369 dwc2->gintmsk |= GINTSTS_OTGINT | GINTSTS_CONIDSTSCHNG | GINTSTS_HPRTINT | GINTSTS_HCINT;
370
371 // NPTX can hold at least 2 packet, change interrupt level to half-empty
372 uint32_t gahbcfg = dwc2->gahbcfg & ~GAHBCFG_TX_FIFO_EPMTY_LVL;
373 gahbcfg |= GAHBCFG_GINT; // Enable global interrupt
374 dwc2->gahbcfg = gahbcfg;
375
376 return true;
377}
378
379// Enable USB interrupt
380void hcd_int_enable (uint8_t rhport) {
381 dwc2_int_set(rhport, TUSB_ROLE_HOST, true);
382}
383
384// Disable USB interrupt
385void hcd_int_disable(uint8_t rhport) {
386 dwc2_int_set(rhport, TUSB_ROLE_HOST, false);
387}
388
389// Get frame number (1ms)
390uint32_t hcd_frame_number(uint8_t rhport) {
391 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
392 return dwc2->hfnum & HFNUM_FRNUM_Msk;
393}
394
395//--------------------------------------------------------------------+
396// Port API
397//--------------------------------------------------------------------+
398
399// Get the current connect status of roothub port
400bool hcd_port_connect_status(uint8_t rhport) {
401 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
402 return dwc2->hprt & HPRT_CONN_STATUS;
403}
404
405// Reset USB bus on the port. Return immediately, bus reset sequence may not be complete.
406// Some port would require hcd_port_reset_end() to be invoked after 10ms to complete the reset sequence.
407void hcd_port_reset(uint8_t rhport) {
408 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
409 uint32_t hprt = dwc2->hprt & ~HPRT_W1_MASK;
410 hprt |= HPRT_RESET;
411 dwc2->hprt = hprt;
412}
413
414// Complete bus reset sequence, may be required by some controllers
415void hcd_port_reset_end(uint8_t rhport) {
416 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
417 uint32_t hprt = dwc2->hprt & ~HPRT_W1_MASK; // skip w1c bits
418 hprt &= ~HPRT_RESET;
419 dwc2->hprt = hprt;
420}
421
422// Get port link speed
424 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
425 const tusb_speed_t speed = hprt_speed_get(dwc2);
426 return speed;
427}
428
429// HCD closes all opened endpoints belong to this device
430void hcd_device_close(uint8_t rhport, uint8_t dev_addr) {
431 (void) rhport;
432 for (uint8_t i = 0; i < (uint8_t) CFG_TUH_DWC2_ENDPOINT_MAX; i++) {
433 hcd_endpoint_t* edpt = &_hcd_data.edpt[i];
434 if (edpt->hcchar_bm.enable && edpt->hcchar_bm.dev_addr == dev_addr) {
435 tu_memclr(edpt, sizeof(hcd_endpoint_t));
436 }
437 }
438}
439
440//--------------------------------------------------------------------+
441// Endpoints API
442//--------------------------------------------------------------------+
443
444// Open an endpoint
445bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t* desc_ep) {
446 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
447 const tusb_speed_t rh_speed = hprt_speed_get(dwc2);
448
449 hcd_devtree_info_t devtree_info;
450 hcd_devtree_get_info(dev_addr, &devtree_info);
451
452 // find a free endpoint
453 const uint8_t ep_id = edpt_alloc();
454 TU_ASSERT(ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
455 hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
456
457 dwc2_channel_char_t* hcchar_bm = &edpt->hcchar_bm;
458 hcchar_bm->ep_size = tu_edpt_packet_size(desc_ep);
459 hcchar_bm->ep_num = tu_edpt_number(desc_ep->bEndpointAddress);
460 hcchar_bm->ep_dir = tu_edpt_dir(desc_ep->bEndpointAddress);
461 hcchar_bm->low_speed_dev = (devtree_info.speed == TUSB_SPEED_LOW) ? 1 : 0;
462 hcchar_bm->ep_type = desc_ep->bmAttributes.xfer; // ep_type matches TUSB_XFER_*
463 hcchar_bm->err_multi_count = 0;
464 hcchar_bm->dev_addr = dev_addr;
465 hcchar_bm->odd_frame = 0;
466 hcchar_bm->disable = 0;
467 hcchar_bm->enable = 1;
468
469 dwc2_channel_split_t* hcsplt_bm = &edpt->hcsplt_bm;
470 hcsplt_bm->hub_port = devtree_info.hub_port;
471 hcsplt_bm->hub_addr = devtree_info.hub_addr;
472 hcsplt_bm->xact_pos = 0;
473 hcsplt_bm->split_compl = 0;
474 hcsplt_bm->split_en = (rh_speed == TUSB_SPEED_HIGH && devtree_info.speed != TUSB_SPEED_HIGH) ? 1 : 0;
475
476 edpt->speed = devtree_info.speed;
477 edpt->next_pid = HCTSIZ_PID_DATA0;
478 if (desc_ep->bmAttributes.xfer == TUSB_XFER_ISOCHRONOUS) {
479 edpt->uframe_interval = 1 << (desc_ep->bInterval - 1);
480 if (devtree_info.speed == TUSB_SPEED_FULL) {
481 edpt->uframe_interval <<= 3;
482 }
483 } else if (desc_ep->bmAttributes.xfer == TUSB_XFER_INTERRUPT) {
484 if (devtree_info.speed == TUSB_SPEED_HIGH) {
485 edpt->uframe_interval = 1 << (desc_ep->bInterval - 1);
486 } else {
487 edpt->uframe_interval = desc_ep->bInterval << 3;
488 }
489 }
490
491 return true;
492}
493
494// clean up channel after part of transfer is done but the whole urb is not complete
495static void channel_xfer_out_wrapup(dwc2_regs_t* dwc2, uint8_t ch_id) {
496 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
497 dwc2_channel_t* channel = &dwc2->channel[ch_id];
498 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
499
500 edpt->next_pid = channel->hctsiz_bm.pid; // save PID
501
502 /* Since hctsiz.xfersize field reflects the number of bytes transferred via the AHB, not the USB)
503 * For IN: we can use hctsiz.xfersize as remaining bytes.
504 * For OUT: Must use the hctsiz.pktcnt field to determine how much data has been transferred. This field reflects the
505 * number of packets that have been transferred via the USB. This is always an integral number of packets if the
506 * transfer was halted before its normal completion.
507 */
508 const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
509 const uint16_t total_packets = cal_packet_count(edpt->buflen, channel->hcchar_bm.ep_size);
510 const uint16_t actual_bytes = (total_packets - remain_packets) * channel->hcchar_bm.ep_size;
511
512 xfer->fifo_bytes = 0;
513 xfer->xferred_bytes += actual_bytes;
514 edpt->buffer += actual_bytes;
515 edpt->buflen -= actual_bytes;
516}
517
518static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
519 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
520 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
521 dwc2_channel_char_t* hcchar_bm = &edpt->hcchar_bm;
522 dwc2_channel_t* channel = &dwc2->channel[ch_id];
523 bool const is_period = edpt_is_periodic(hcchar_bm->ep_type);
524
525 // clear previous state
526 xfer->fifo_bytes = 0;
527
528 // hchar: restore but don't enable yet
529 if (is_period) {
530 hcchar_bm->odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
531 }
532 channel->hcchar = (edpt->hcchar & ~HCCHAR_CHENA);
533
534 // hctsiz: zero length packet still count as 1
535 const uint16_t packet_count = cal_packet_count(edpt->buflen, hcchar_bm->ep_size);
536 uint32_t hctsiz = (edpt->next_pid << HCTSIZ_PID_Pos) | (packet_count << HCTSIZ_PKTCNT_Pos) | edpt->buflen;
537 if (edpt->do_ping && edpt->speed == TUSB_SPEED_HIGH &&
538 edpt->next_pid != HCTSIZ_PID_SETUP && hcchar_bm->ep_dir == TUSB_DIR_OUT) {
539 hctsiz |= HCTSIZ_DOPING;
540 }
541 channel->hctsiz = hctsiz;
542 edpt->do_ping = 0;
543
544 // pre-calculate next PID based on packet count, adjusted in transfer complete interrupt if short packet
545 if (hcchar_bm->ep_num == 0) {
546 edpt->next_pid = HCTSIZ_PID_DATA1; // control data and status stage always start with DATA1
547 } else {
548 edpt->next_pid = cal_next_pid(edpt->next_pid, packet_count);
549 }
550
551 channel->hcsplt = edpt->hcsplt;
552 channel->hcint = 0xFFFFFFFFU; // clear all channel interrupts
553
554 if (dma_host_enabled(dwc2)) {
555 uint32_t hcintmsk = HCINT_HALTED;
556 channel->hcintmsk = hcintmsk;
557 dwc2->haintmsk |= TU_BIT(ch_id);
558
559 channel->hcdma = (uint32_t) edpt->buffer;
560
561 if (hcchar_bm->ep_dir == TUSB_DIR_IN) {
562 channel_send_in_token(dwc2, channel);
563 } else {
564 channel->hcchar |= HCCHAR_CHENA;
565 }
566 } else {
567 uint32_t hcintmsk = HCINT_NAK | HCINT_XACT_ERR | HCINT_STALL | HCINT_XFER_COMPLETE | HCINT_DATATOGGLE_ERR;
568 if (hcchar_bm->ep_dir == TUSB_DIR_IN) {
569 hcintmsk |= HCINT_BABBLE_ERR | HCINT_DATATOGGLE_ERR | HCINT_ACK;
570 } else {
571 hcintmsk |= HCINT_NYET;
572 if (edpt->hcsplt_bm.split_en) {
573 hcintmsk |= HCINT_ACK;
574 }
575 }
576 channel->hcintmsk = hcintmsk;
577 dwc2->haintmsk |= TU_BIT(ch_id);
578
579 // enable channel for slave mode:
580 // - OUT: it will enable corresponding FIFO channel
581 // - IN : it will write an IN request to the Non-periodic Request Queue, this will have dwc2 trying to send
582 // IN Token. If we got NAK, we have to re-enable the channel again in the interrupt. Due to the way usbh stack only
583 // call hcd_edpt_xfer() once, we will need to manage de-allocate/re-allocate IN channel dynamically.
584 if (hcchar_bm->ep_dir == TUSB_DIR_IN) {
585 channel_send_in_token(dwc2, channel);
586 } else {
587 channel->hcchar |= HCCHAR_CHENA;
588 if (edpt->buflen > 0) {
589 // To prevent conflict with other channel, we will enable periodic/non-periodic FIFO empty interrupt accordingly
590 // And write packet in the interrupt handler
591 dwc2->gintmsk |= (is_period ? GINTSTS_PTX_FIFO_EMPTY : GINTSTS_NPTX_FIFO_EMPTY);
592 }
593 }
594 }
595
596 return true;
597}
598
599// kick-off transfer with an endpoint
600static bool edpt_xfer_kickoff(dwc2_regs_t* dwc2, uint8_t ep_id) {
601 uint8_t ch_id = channel_alloc(dwc2);
602 TU_ASSERT(ch_id < 16); // all channel are in used
603 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
604 xfer->ep_id = ep_id;
605 xfer->result = XFER_RESULT_INVALID;
606
607 return channel_xfer_start(dwc2, ch_id);
608}
609
610// Submit a transfer, when complete hcd_event_xfer_complete() must be invoked
611bool hcd_edpt_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr, uint8_t * buffer, uint16_t buflen) {
612 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
613 const uint8_t ep_num = tu_edpt_number(ep_addr);
614 const uint8_t ep_dir = tu_edpt_dir(ep_addr);
615
616 uint8_t ep_id = edpt_find_opened(dev_addr, ep_num, ep_dir);
617 TU_ASSERT(ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
618 hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
619
620 edpt->buffer = buffer;
621 edpt->buflen = buflen;
622
623 if (ep_num == 0) {
624 // update ep_dir since control endpoint can switch direction
625 edpt->hcchar_bm.ep_dir = ep_dir;
626 }
627
628 return edpt_xfer_kickoff(dwc2, ep_id);
629}
630
631// Abort a queued transfer. Note: it can only abort transfer that has not been started
632// Return true if a queued transfer is aborted, false if there is no transfer to abort
633bool hcd_edpt_abort_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr) {
634 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
635 const uint8_t ep_num = tu_edpt_number(ep_addr);
636 const uint8_t ep_dir = tu_edpt_dir(ep_addr);
637 const uint8_t ep_id = edpt_find_opened(dev_addr, ep_num, ep_dir);
638 TU_VERIFY(ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
639
640 // hcd_int_disable(rhport);
641
642 // Find enabled channeled and disable it, channel will be de-allocated in the interrupt handler
643 const uint8_t ch_id = channel_find_enabled(dwc2, dev_addr, ep_num, ep_dir);
644 if (ch_id < 16) {
645 dwc2_channel_t* channel = &dwc2->channel[ch_id];
646 channel_disable(dwc2, channel);
647 }
648
649 // hcd_int_enable(rhport);
650
651 return true;
652}
653
654// Submit a special transfer to send 8-byte Setup Packet, when complete hcd_event_xfer_complete() must be invoked
655bool hcd_setup_send(uint8_t rhport, uint8_t dev_addr, const uint8_t setup_packet[8]) {
656 uint8_t ep_id = edpt_find_opened(dev_addr, 0, TUSB_DIR_OUT);
657 TU_ASSERT(ep_id < CFG_TUH_DWC2_ENDPOINT_MAX); // no opened endpoint
658 hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
659 edpt->next_pid = HCTSIZ_PID_SETUP;
660
661 return hcd_edpt_xfer(rhport, dev_addr, 0, (uint8_t*)(uintptr_t) setup_packet, 8);
662}
663
664// clear stall, data toggle is also reset to DATA0
665bool hcd_edpt_clear_stall(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr) {
666 (void) rhport;
667 const uint8_t ep_num = tu_edpt_number(ep_addr);
668 const uint8_t ep_dir = tu_edpt_dir(ep_addr);
669 const uint8_t ep_id = edpt_find_opened(dev_addr, ep_num, ep_dir);
670 TU_VERIFY(ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
671 hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
672
673 edpt->next_pid = HCTSIZ_PID_DATA0;
674
675 return true;
676}
677
678//--------------------------------------------------------------------
679// HCD Event Handler
680//--------------------------------------------------------------------
681static void channel_xfer_in_retry(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hcint) {
682 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
683 dwc2_channel_t* channel = &dwc2->channel[ch_id];
684 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
685
686 if (edpt_is_periodic(channel->hcchar_bm.ep_type)){
687 // retry immediately for periodic split NYET if we haven't reach max retry
688 if (channel->hcsplt_bm.split_en && channel->hcsplt_bm.split_compl && (hcint & HCINT_NYET || xfer->halted_nyet)) {
689 xfer->period_split_nyet_count++;
690 xfer->halted_nyet = 0;
691 if (xfer->period_split_nyet_count < HCD_XFER_PERIOD_SPLIT_NYET_MAX) {
692 channel->hcchar_bm.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
693 channel_send_in_token(dwc2, channel);
694 return;
695 } else {
696 // too many NYET, de-allocate channel with below code
697 xfer->period_split_nyet_count = 0;
698 }
699 }
700
701 // for periodic, de-allocate channel, enable SOF set frame counter for later transfer
702 edpt->next_pid = channel->hctsiz_bm.pid; // save PID
703 edpt->uframe_countdown = edpt->uframe_interval;
704 dwc2->gintmsk |= GINTSTS_SOF;
705
706 if (hcint & HCINT_HALTED) {
707 // already halted, de-allocate channel (called from DMA isr)
708 channel_dealloc(dwc2, ch_id);
709 } else {
710 // disable channel first if not halted (called slave isr)
711 xfer->halted_sof_schedule = 1;
712 channel_disable(dwc2, channel);
713 }
714 } else {
715 // for control/bulk: retry immediately
716 channel_send_in_token(dwc2, channel);
717 }
718}
719
720#if CFG_TUSB_DEBUG
721TU_ATTR_ALWAYS_INLINE static inline void print_hcint(uint32_t hcint) {
722 const char* str[] = {
723 "XFRC", "HALTED", "AHBERR", "STALL",
724 "NAK", "ACK", "NYET", "XERR",
725 "BBLERR", "FRMOR", "DTERR", "BNA",
726 "XCSERR", "DESC_LST"
727 };
728
729 for(uint32_t i=0; i<14; i++) {
730 if (hcint & TU_BIT(i)) {
731 TU_LOG1("%s ", str[i]);
732 }
733 }
734 TU_LOG1("\r\n");
735}
736#endif
737
738#if CFG_TUH_DWC2_SLAVE_ENABLE
739static void handle_rxflvl_irq(uint8_t rhport) {
740 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
741
742 // Pop control word off FIFO
743 const dwc2_grxstsp_t grxstsp_bm = dwc2->grxstsp_bm;
744 const uint8_t ch_id = grxstsp_bm.ep_ch_num;
745
746 switch (grxstsp_bm.packet_status) {
748 // In packet received, pop this entry --> ACK interrupt
749 const uint16_t byte_count = grxstsp_bm.byte_count;
750 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
751 TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,);
752 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
753
754 if (byte_count) {
755 dfifo_read_packet(dwc2, edpt->buffer + xfer->xferred_bytes, byte_count);
756 xfer->xferred_bytes += byte_count;
757 xfer->fifo_bytes = byte_count;
758 }
759 break;
760 }
761
763 // In transfer complete: After this entry is popped from the rx FIFO, dwc2 asserts a Transfer Completed
764 // interrupt --> handle_channel_irq()
765 break;
766
768 TU_ASSERT(0, ); // maybe try to change DToggle
769 break;
770
772 // triggered when channel.hcchar_bm.disable is set
773 // TODO handle later
774 break;
775
776 default: break; // ignore other status
777 }
778}
779
780// return true if there is still pending data and need more ISR
781static bool handle_txfifo_empty(dwc2_regs_t* dwc2, bool is_periodic) {
782 // Use period txsts for both p/np to get request queue space available (1-bit difference, it is small enough)
783 volatile dwc2_hptxsts_t* txsts_bm = (volatile dwc2_hptxsts_t*) (is_periodic ? &dwc2->hptxsts : &dwc2->hnptxsts);
784
785 const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
786 for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
787 dwc2_channel_t* channel = &dwc2->channel[ch_id];
788 // skip writing to FIFO if channel is expecting halted.
789 if (!(channel->hcintmsk & HCINT_HALTED) && (channel->hcchar_bm.ep_dir == TUSB_DIR_OUT)) {
790 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
791 TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
792 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
793
794 const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
795 for (uint16_t i = 0; i < remain_packets; i++) {
796 const uint16_t remain_bytes = edpt->buflen - xfer->fifo_bytes;
797 const uint16_t xact_bytes = tu_min16(remain_bytes, channel->hcchar_bm.ep_size);
798
799 // skip if there is not enough space in FIFO and RequestQueue.
800 // Packet's last word written to FIFO will trigger a request queue
801 if ((xact_bytes > (txsts_bm->fifo_available << 2)) || (txsts_bm->req_queue_available == 0)) {
802 return true;
803 }
804
805 dfifo_write_packet(dwc2, ch_id, edpt->buffer + xfer->fifo_bytes, xact_bytes);
806 xfer->fifo_bytes += xact_bytes;
807 }
808 }
809 }
810
811 return false; // no channel has pending data
812}
813
814static bool handle_channel_in_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hcint) {
815 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
816 dwc2_channel_t* channel = &dwc2->channel[ch_id];
817 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
818 bool is_done = false;
819
820 // if (channel->hcsplt_bm.split_en) {
821 // if (edpt->hcchar_bm.ep_num == 1) {
822 // TU_LOG1("Frame %u, ch %u: ep %u, hcint 0x%04lX ", dwc2->hfnum_bm.num, ch_id, channel->hcchar_bm.ep_num, hcint);
823 // print_hcint(hcint);
824 // }
825
826 if (hcint & HCINT_XFER_COMPLETE) {
827 if (edpt->hcchar_bm.ep_num != 0) {
828 edpt->next_pid = channel->hctsiz_bm.pid; // save pid (already toggled)
829 }
830
831 const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
832 if (channel->hcsplt_bm.split_en && remain_packets && xfer->fifo_bytes == edpt->hcchar_bm.ep_size) {
833 // Split can only complete 1 transaction (up to 1 packet) at a time, schedule more
834 channel->hcsplt_bm.split_compl = 0;
835 } else {
836 xfer->result = XFER_RESULT_SUCCESS;
837 }
838
839 channel_disable(dwc2, channel);
840 } else if (hcint & (HCINT_XACT_ERR | HCINT_BABBLE_ERR | HCINT_STALL)) {
841 if (hcint & HCINT_STALL) {
842 xfer->result = XFER_RESULT_STALLED;
843 } else if (hcint & HCINT_BABBLE_ERR) {
844 xfer->result = XFER_RESULT_FAILED;
845 } else if (hcint & HCINT_XACT_ERR) {
846 xfer->err_count++;
847 channel->hcintmsk |= HCINT_ACK;
848 }
849
850 channel_disable(dwc2, channel);
851 } else if (hcint & HCINT_NYET) {
852 // restart complete split
853 channel->hcsplt_bm.split_compl = 1;
854 xfer->halted_nyet = 1;
855 channel_disable(dwc2, channel);
856 } else if (hcint & HCINT_NAK) {
857 // NAK received, re-enable channel if request queue is available
858 if (channel->hcsplt_bm.split_en) {
859 channel->hcsplt_bm.split_compl = 0; // restart with start-split
860 }
861
862 channel_disable(dwc2, channel);
863 } else if (hcint & HCINT_ACK) {
864 xfer->err_count = 0;
865
866 if (channel->hcsplt_bm.split_en) {
867 if (!channel->hcsplt_bm.split_compl) {
868 // start split is ACK --> do complete split
869 channel->hcintmsk |= HCINT_NYET;
870 channel->hcsplt_bm.split_compl = 1;
871 channel_send_in_token(dwc2, channel);
872 } else {
873 // do nothing for complete split with DATA, this will trigger XferComplete and handled there
874 }
875 } else {
876 // ACK with data
877 const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
878 if (remain_packets) {
879 // still more packet to receive, also reset to start split
880 channel->hcsplt_bm.split_compl = 0;
881 channel_send_in_token(dwc2, channel);
882 }
883 }
884 } else if (hcint & HCINT_HALTED) {
885 channel->hcintmsk &= ~HCINT_HALTED;
886 if (xfer->halted_sof_schedule) {
887 // de-allocate channel but does not complete xfer, we schedule it in the SOF interrupt
888 channel_dealloc(dwc2, ch_id);
889 } else if (xfer->result != XFER_RESULT_INVALID) {
890 is_done = true;
891 } else if (xfer->err_count == HCD_XFER_ERROR_MAX) {
892 xfer->result = XFER_RESULT_FAILED;
893 is_done = true;
894 } else {
895 // got here due to NAK or NYET
896 channel_xfer_in_retry(dwc2, ch_id, hcint);
897 }
898 } else if (hcint & HCINT_DATATOGGLE_ERR) {
899 xfer->err_count = 0;
900 TU_ASSERT(false);
901 }
902 return is_done;
903}
904
905static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hcint) {
906 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
907 dwc2_channel_t* channel = &dwc2->channel[ch_id];
908 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
909 bool is_done = false;
910
911 if (hcint & HCINT_XFER_COMPLETE) {
912 is_done = true;
913 xfer->result = XFER_RESULT_SUCCESS;
914 channel->hcintmsk &= ~HCINT_ACK;
915 } else if (hcint & HCINT_STALL) {
916 xfer->result = XFER_RESULT_STALLED;
917 channel_disable(dwc2, channel);
918 } else if (hcint & HCINT_NYET) {
919 xfer->err_count = 0;
920 if (channel->hcsplt_bm.split_en) {
921 // retry complete split
922 channel->hcsplt_bm.split_compl = 1;
923 channel->hcchar |= HCCHAR_CHENA;
924 } else {
925 edpt->do_ping = 1;
926 channel_xfer_out_wrapup(dwc2, ch_id);
927 channel_disable(dwc2, channel);
928 }
929 } else if (hcint & (HCINT_NAK | HCINT_XACT_ERR)) {
930 // clean up transfer so far, disable and start again later
931 channel_xfer_out_wrapup(dwc2, ch_id);
932 channel_disable(dwc2, channel);
933 if (hcint & HCINT_XACT_ERR) {
934 xfer->err_count++;
935 channel->hcintmsk |= HCINT_ACK;
936 } else {
937 // NAK disable channel to flush all posted request and try again
938 edpt->do_ping = 1;
939 xfer->err_count = 0;
940 }
941 } else if (hcint & HCINT_HALTED) {
942 channel->hcintmsk &= ~HCINT_HALTED;
943 if (xfer->result != XFER_RESULT_INVALID) {
944 is_done = true;
945 } else if (xfer->err_count == HCD_XFER_ERROR_MAX) {
946 xfer->result = XFER_RESULT_FAILED;
947 is_done = true;
948 } else {
949 // Got here due to NAK or NYET
950 TU_ASSERT(channel_xfer_start(dwc2, ch_id));
951 }
952 } else if (hcint & HCINT_ACK) {
953 xfer->err_count = 0;
954 channel->hcintmsk &= ~HCINT_ACK;
955 if (channel->hcsplt_bm.split_en && !channel->hcsplt_bm.split_compl) {
956 // start split is ACK --> do complete split
957 channel->hcsplt_bm.split_compl = 1;
958 channel->hcchar |= HCCHAR_CHENA;
959 }
960 }
961
962 if (is_done) {
963 xfer->xferred_bytes += xfer->fifo_bytes;
964 xfer->fifo_bytes = 0;
965 }
966
967 return is_done;
968}
969#endif
970
971#if CFG_TUH_DWC2_DMA_ENABLE
972static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hcint) {
973 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
974 dwc2_channel_t* channel = &dwc2->channel[ch_id];
975 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
976
977 bool is_done = false;
978
979 // TU_LOG1("in hcint = %02lX\r\n", hcint);
980
981 if (hcint & HCINT_HALTED) {
982 if (hcint & (HCINT_XFER_COMPLETE | HCINT_STALL | HCINT_BABBLE_ERR)) {
983 const uint16_t remain_bytes = (uint16_t) channel->hctsiz_bm.xfer_size;
984 const uint16_t remain_packets = channel->hctsiz_bm.packet_count;
985 const uint16_t actual_len = edpt->buflen - remain_bytes;
986 xfer->xferred_bytes += actual_len;
987
988 is_done = true;
989
990 if (hcint & HCINT_STALL) {
991 xfer->result = XFER_RESULT_STALLED;
992 } else if (hcint & HCINT_BABBLE_ERR) {
993 xfer->result = XFER_RESULT_FAILED;
994 } else if (channel->hcsplt_bm.split_en && remain_packets && actual_len == edpt->hcchar_bm.ep_size) {
995 // Split can only complete 1 transaction (up to 1 packet) at a time, schedule more
996 is_done = false;
997 edpt->buffer += actual_len;
998 edpt->buflen -= actual_len;
999
1000 channel->hcsplt_bm.split_compl = 0;
1001 channel_xfer_in_retry(dwc2, ch_id, hcint);
1002 } else {
1003 xfer->result = XFER_RESULT_SUCCESS;
1004 }
1005
1006 xfer->err_count = 0;
1007 channel->hcintmsk &= ~HCINT_ACK;
1008 } else if (hcint & HCINT_XACT_ERR) {
1009 xfer->err_count++;
1010 if (xfer->err_count >= HCD_XFER_ERROR_MAX) {
1011 is_done = true;
1012 xfer->result = XFER_RESULT_FAILED;
1013 } else {
1014 channel->hcintmsk |= HCINT_ACK | HCINT_NAK | HCINT_DATATOGGLE_ERR;
1015 channel->hcsplt_bm.split_compl = 0;
1016 channel_xfer_in_retry(dwc2, ch_id, hcint);
1017 }
1018 } else if (hcint & HCINT_NYET) {
1019 // Must handle nyet before nak or ack. Could get a nyet at the same time as either of those on a BULK/CONTROL
1020 // OUT that started with a PING. The nyet takes precedence.
1021 if (channel->hcsplt_bm.split_en) {
1022 // split not yet mean hub has no data, retry complete split
1023 channel->hcsplt_bm.split_compl = 1;
1024 channel_xfer_in_retry(dwc2, ch_id, hcint);
1025 }
1026 } else if (hcint & HCINT_ACK) {
1027 xfer->err_count = 0;
1028 channel->hcintmsk &= ~HCINT_ACK;
1029 if (channel->hcsplt_bm.split_en) {
1030 // start split is ACK --> do complete split
1031 // TODO: for ISO must use xact_pos to plan complete split based on microframe (up to 187.5 bytes/uframe)
1032 channel->hcsplt_bm.split_compl = 1;
1033 if (edpt_is_periodic(channel->hcchar_bm.ep_type)) {
1034 channel->hcchar_bm.odd_frame = 1 - (dwc2->hfnum & 1); // transfer on next frame
1035 }
1036 channel_send_in_token(dwc2, channel);
1037 }
1038 } else if (hcint & (HCINT_NAK | HCINT_DATATOGGLE_ERR)) {
1039 xfer->err_count = 0;
1040 channel->hcintmsk &= ~(HCINT_NAK | HCINT_DATATOGGLE_ERR);
1041 channel->hcsplt_bm.split_compl = 0; // restart with start-split
1042 channel_xfer_in_retry(dwc2, ch_id, hcint);
1043 } else if (hcint & HCINT_FARME_OVERRUN) {
1044 // retry start-split in next binterval
1045 channel_xfer_in_retry(dwc2, ch_id, hcint);
1046 }
1047 }
1048
1049 return is_done;
1050}
1051
1052static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hcint) {
1053 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
1054 dwc2_channel_t* channel = &dwc2->channel[ch_id];
1055 hcd_endpoint_t* edpt = &_hcd_data.edpt[xfer->ep_id];
1056
1057 bool is_done = false;
1058
1059 // TU_LOG1("out hcint = %02lX\r\n", hcint);
1060
1061 if (hcint & HCINT_HALTED) {
1062 if (hcint & (HCINT_XFER_COMPLETE | HCINT_STALL)) {
1063 is_done = true;
1064 xfer->err_count = 0;
1065 if (hcint & HCINT_XFER_COMPLETE) {
1066 xfer->result = XFER_RESULT_SUCCESS;
1067 xfer->xferred_bytes += edpt->buflen;
1068 } else {
1069 xfer->result = XFER_RESULT_STALLED;
1070 channel_xfer_out_wrapup(dwc2, ch_id);
1071 }
1072 channel->hcintmsk &= ~HCINT_ACK;
1073 } else if (hcint & HCINT_XACT_ERR) {
1074 if (hcint & (HCINT_NAK | HCINT_NYET | HCINT_ACK)) {
1075 xfer->err_count = 0;
1076 // clean up transfer so far and start again
1077 channel_xfer_out_wrapup(dwc2, ch_id);
1078 channel_xfer_start(dwc2, ch_id);
1079 } else {
1080 xfer->err_count++;
1081 if (xfer->err_count >= HCD_XFER_ERROR_MAX) {
1082 xfer->result = XFER_RESULT_FAILED;
1083 is_done = true;
1084 } else {
1085 // clean up transfer so far and start again
1086 channel_xfer_out_wrapup(dwc2, ch_id);
1087 channel_xfer_start(dwc2, ch_id);
1088 }
1089 }
1090 } else if (hcint & HCINT_NYET) {
1091 if (channel->hcsplt_bm.split_en && channel->hcsplt_bm.split_compl) {
1092 // split not yet mean hub has no data, retry complete split
1093 channel->hcsplt_bm.split_compl = 1;
1094 channel->hcchar |= HCCHAR_CHENA;
1095 }
1096 } else if (hcint & HCINT_ACK) {
1097 xfer->err_count = 0;
1098 if (channel->hcsplt_bm.split_en && !channel->hcsplt_bm.split_compl) {
1099 // start split is ACK --> do complete split
1100 channel->hcsplt_bm.split_compl = 1;
1101 channel->hcchar |= HCCHAR_CHENA;
1102 }
1103 }
1104 } else if (hcint & HCINT_ACK) {
1105 xfer->err_count = 0;
1106 channel->hcintmsk &= ~HCINT_ACK;
1107 }
1108
1109 return is_done;
1110}
1111#endif
1112
1113static void handle_channel_irq(uint8_t rhport, bool in_isr) {
1114 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
1115 const bool is_dma = dma_host_enabled(dwc2);
1116 const uint8_t max_channel = DWC2_CHANNEL_COUNT(dwc2);
1117
1118 for (uint8_t ch_id = 0; ch_id < max_channel; ch_id++) {
1119 if (tu_bit_test(dwc2->haint, ch_id)) {
1120 dwc2_channel_t* channel = &dwc2->channel[ch_id];
1121 hcd_xfer_t* xfer = &_hcd_data.xfer[ch_id];
1122 TU_ASSERT(xfer->ep_id < CFG_TUH_DWC2_ENDPOINT_MAX,);
1123 dwc2_channel_char_t hcchar_bm = channel->hcchar_bm;
1124
1125 uint32_t hcint = channel->hcint;
1126 channel->hcint = hcint;
1127
1128 bool is_done;
1129 if (is_dma) {
1130 #if CFG_TUH_DWC2_DMA_ENABLE
1131 if (hcchar_bm.ep_dir == TUSB_DIR_OUT) {
1132 is_done = handle_channel_out_dma(dwc2, ch_id, hcint);
1133 } else {
1134 is_done = handle_channel_in_dma(dwc2, ch_id, hcint);
1135 }
1136 #endif
1137 } else {
1138 #if CFG_TUH_DWC2_SLAVE_ENABLE
1139 if (hcchar_bm.ep_dir == TUSB_DIR_OUT) {
1140 is_done = handle_channel_out_slave(dwc2, ch_id, hcint);
1141 } else {
1142 is_done = handle_channel_in_slave(dwc2, ch_id, hcint);
1143 }
1144 #endif
1145 }
1146
1147 if (is_done) {
1148 const uint8_t ep_addr = tu_edpt_addr(hcchar_bm.ep_num, hcchar_bm.ep_dir);
1149 hcd_event_xfer_complete(hcchar_bm.dev_addr, ep_addr, xfer->xferred_bytes, xfer->result, in_isr);
1150 channel_dealloc(dwc2, ch_id);
1151 }
1152 }
1153 }
1154}
1155
1156// SOF is enabled for scheduled periodic transfer
1157static bool handle_sof_irq(uint8_t rhport, bool in_isr) {
1158 (void) in_isr;
1159 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
1160
1161 bool more_isr = false;
1162
1163 // If highspeed then SOF is 125us, else 1ms
1164 const uint32_t ucount = (hprt_speed_get(dwc2) == TUSB_SPEED_HIGH ? 1 : 8);
1165
1166 for(uint8_t ep_id = 0; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX; ep_id++) {
1167 hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
1168 if (edpt->hcchar_bm.enable && edpt_is_periodic(edpt->hcchar_bm.ep_type) && edpt->uframe_countdown > 0) {
1169 edpt->uframe_countdown -= tu_min32(ucount, edpt->uframe_countdown);
1170 if (edpt->uframe_countdown == 0) {
1171 if (!edpt_xfer_kickoff(dwc2, ep_id)) {
1172 edpt->uframe_countdown = ucount; // failed to start, try again next frame
1173 }
1174 }
1175
1176 more_isr = true;
1177 }
1178 }
1179
1180 return more_isr;
1181}
1182
1183// Config HCFG FS/LS clock and HFIR for SOF interval according to link speed (value is in PHY clock unit)
1184static void port0_enable(dwc2_regs_t* dwc2, tusb_speed_t speed) {
1185 uint32_t hcfg = dwc2->hcfg & ~HCFG_FSLS_PHYCLK_SEL;
1186
1187 const dwc2_gusbcfg_t gusbcfg_bm = dwc2->gusbcfg_bm;
1188 uint32_t phy_clock;
1189
1190 if (gusbcfg_bm.phy_sel) {
1191 phy_clock = 48; // dedicated FS is 48Mhz
1192 if (speed == TUSB_SPEED_LOW) {
1193 hcfg |= HCFG_FSLS_PHYCLK_SEL_6MHZ;
1194 } else {
1195 hcfg |= HCFG_FSLS_PHYCLK_SEL_48MHZ;
1196 }
1197 } else {
1198 if (gusbcfg_bm.ulpi_utmi_sel) {
1199 phy_clock = 60; // ULPI 8-bit is 60Mhz
1200 } else {
1201 // UTMI+ 16-bit is 30Mhz, 8-bit is 60Mhz
1202 phy_clock = gusbcfg_bm.phy_if16 ? 30 : 60;
1203
1204 // Enable UTMI+ low power mode 48Mhz external clock if not highspeed
1205 if (speed == TUSB_SPEED_HIGH) {
1206 dwc2->gusbcfg &= ~GUSBCFG_PHYLPCS;
1207 } else {
1208 dwc2->gusbcfg |= GUSBCFG_PHYLPCS;
1209 // may need to reset port
1210 }
1211 }
1212 hcfg |= HCFG_FSLS_PHYCLK_SEL_30_60MHZ;
1213 }
1214
1215 dwc2->hcfg = hcfg;
1216
1217 uint32_t hfir = dwc2->hfir & ~HFIR_FRIVL_Msk;
1218 if (speed == TUSB_SPEED_HIGH) {
1219 hfir |= 125*phy_clock;
1220 } else {
1221 hfir |= 1000*phy_clock;
1222 }
1223
1224 dwc2->hfir = hfir;
1225}
1226
1227/* Handle Host Port interrupt, possible source are:
1228 - Connection Detection
1229 - Enable Change
1230 - Over Current Change
1231*/
1232static void handle_hprt_irq(uint8_t rhport, bool in_isr) {
1233 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
1234 uint32_t hprt = dwc2->hprt & ~HPRT_W1_MASK;
1235 const dwc2_hprt_t hprt_bm = dwc2->hprt_bm;
1236
1237 if (dwc2->hprt & HPRT_CONN_DETECT) {
1238 // Port Connect Detect
1239 hprt |= HPRT_CONN_DETECT;
1240
1241 if (hprt_bm.conn_status) {
1243 } else {
1245 }
1246 }
1247
1248 if (dwc2->hprt & HPRT_ENABLE_CHANGE) {
1249 // Port enable change
1250 hprt |= HPRT_ENABLE_CHANGE;
1251
1252 if (hprt_bm.enable) {
1253 // Port enable
1254 const tusb_speed_t speed = hprt_speed_get(dwc2);
1255 port0_enable(dwc2, speed);
1256 } else {
1257 // TU_ASSERT(false, );
1258 }
1259 }
1260
1261 dwc2->hprt = hprt; // clear interrupt
1262}
1263
1264/* Interrupt Hierarchy
1265 HCINTn HPRT
1266 | |
1267 HAINT.CHn |
1268 | |
1269 GINTSTS : HCInt | PrtInt | NPTxFEmp | PTxFEmpp | RXFLVL | SOF
1270*/
1271void hcd_int_handler(uint8_t rhport, bool in_isr) {
1272 dwc2_regs_t* dwc2 = DWC2_REG(rhport);
1273 const uint32_t gintmsk = dwc2->gintmsk;
1274 const uint32_t gintsts = dwc2->gintsts & gintmsk;
1275
1276 // TU_LOG1_HEX(gintsts);
1277
1278 if (gintsts & GINTSTS_CONIDSTSCHNG) {
1279 // Connector ID status change
1280 dwc2->gintsts = GINTSTS_CONIDSTSCHNG;
1281
1282 //if (dwc2->gotgctl)
1283 // dwc2->hprt = HPRT_POWER; // power on port to turn on VBUS
1284 //dwc2->gintmsk |= GINTMSK_PRTIM;
1285 // TODO wait for SRP if OTG
1286 }
1287
1288 if (gintsts & GINTSTS_SOF) {
1289 const bool more_sof = handle_sof_irq(rhport, in_isr);
1290 if (!more_sof) {
1291 dwc2->gintmsk &= ~GINTSTS_SOF;
1292 }
1293 }
1294
1295 if (gintsts & GINTSTS_HPRTINT) {
1296 // Host port interrupt: source is cleared in HPRT register
1297 // TU_LOG1_HEX(dwc2->hprt);
1298 handle_hprt_irq(rhport, in_isr);
1299 }
1300
1301 if (gintsts & GINTSTS_HCINT) {
1302 // Host Channel interrupt: source is cleared in HCINT register
1303 // must be handled after TX FIFO empty
1304 handle_channel_irq(rhport, in_isr);
1305 }
1306
1307#if CFG_TUH_DWC2_SLAVE_ENABLE
1308 // RxFIFO non-empty interrupt handling
1309 if (gintsts & GINTSTS_RXFLVL) {
1310 // RXFLVL bit is read-only
1311 dwc2->gintmsk &= ~GINTSTS_RXFLVL; // disable RXFLVL interrupt while reading
1312
1313 do {
1314 handle_rxflvl_irq(rhport); // read all packets
1315 } while(dwc2->gintsts & GINTSTS_RXFLVL);
1316
1317 dwc2->gintmsk |= GINTSTS_RXFLVL;
1318 }
1319
1320 if (gintsts & GINTSTS_NPTX_FIFO_EMPTY) {
1321 // NPTX FIFO empty interrupt, this is read-only and cleared by hardware when FIFO is written
1322 const bool more_nptxfe = handle_txfifo_empty(dwc2, false);
1323 if (!more_nptxfe) {
1324 // no more pending packet, disable interrupt
1325 dwc2->gintmsk &= ~GINTSTS_NPTX_FIFO_EMPTY;
1326 }
1327 }
1328
1329 if (gintsts & GINTSTS_PTX_FIFO_EMPTY) {
1330 // PTX FIFO empty interrupt, this is read-only and cleared by hardware when FIFO is written
1331 const bool more_ptxfe = handle_txfifo_empty(dwc2, true);
1332 if (!more_ptxfe) {
1333 // no more pending packet, disable interrupt
1334 dwc2->gintmsk &= ~GINTSTS_PTX_FIFO_EMPTY;
1335 }
1336 }
1337#endif
1338}
1339
1340#endif
static bool in_isr
xfer_td_t xfer[EP_CBI_COUNT+1][2]
Definition: dcd_nrf5x.c:119
uint8_t dev_addr
Definition: dcd_pic32mz.c:81
void dfifo_read_packet(dwc2_regs_t *dwc2, uint8_t *dst, uint16_t len)
Definition: dwc2_common.c:252
bool dwc2_core_is_highspeed(dwc2_regs_t *dwc2, tusb_role_t role)
Definition: dwc2_common.c:172
bool dwc2_core_init(uint8_t rhport, bool is_highspeed)
Definition: dwc2_common.c:197
void dfifo_write_packet(dwc2_regs_t *dwc2, uint8_t fifo_num, const uint8_t *src, uint16_t len)
Definition: dwc2_common.c:277
static TU_ATTR_ALWAYS_INLINE dwc2_regs_t * DWC2_REG(uint8_t rhport)
Definition: dwc2_common.h:67
static TU_ATTR_ALWAYS_INLINE void dwc2_int_set(uint8_t rhport, tusb_role_t role, bool enabled)
Definition: dwc2_esp32.h:83
static const dwc2_controller_t _dwc2_controller[]
Definition: dwc2_esp32.h:57
@ HCTSIZ_PID_DATA1
Definition: dwc2_type.h:144
@ HCTSIZ_PID_DATA0
Definition: dwc2_type.h:142
@ HCTSIZ_PID_SETUP
Definition: dwc2_type.h:145
@ HPRT_SPEED_FULL
Definition: dwc2_type.h:132
@ HPRT_SPEED_HIGH
Definition: dwc2_type.h:131
@ HPRT_SPEED_LOW
Definition: dwc2_type.h:133
@ HCCHAR_EPTYPE_INTERRUPT
Definition: dwc2_type.h:171
@ HCCHAR_EPTYPE_ISOCHRONOUS
Definition: dwc2_type.h:169
@ GINTSTS_CMODE_HOST
Definition: dwc2_type.h:138
@ GRXSTS_PKTSTS_HOST_CHANNEL_HALTED
Definition: dwc2_type.h:163
@ GRXSTS_PKTSTS_RX_COMPLETE
Definition: dwc2_type.h:161
@ GRXSTS_PKTSTS_RX_DATA
Definition: dwc2_type.h:160
@ GRXSTS_PKTSTS_HOST_DATATOGGLE_ERR
Definition: dwc2_type.h:162
@ GHWCFG2_ARCH_INTERNAL_DMA
Definition: dwc2_type.h:106
static TU_ATTR_ALWAYS_INLINE void hcd_event_device_remove(uint8_t rhport, bool in_isr)
Definition: hcd.h:209
static TU_ATTR_ALWAYS_INLINE void hcd_event_xfer_complete(uint8_t dev_addr, uint8_t ep_addr, uint32_t xferred_bytes, xfer_result_t result, bool in_isr)
Definition: hcd.h:221
static TU_ATTR_ALWAYS_INLINE void hcd_event_device_attach(uint8_t rhport, bool in_isr)
Definition: hcd.h:197
void hcd_devtree_get_info(uint8_t dev_addr, hcd_devtree_info_t *devtree_info)
Definition: usbh.c:949
static bool channel_xfer_start(dwc2_regs_t *dwc2, uint8_t ch_id)
Definition: hcd_dwc2.c:518
static TU_ATTR_ALWAYS_INLINE uint8_t edpt_alloc(void)
Definition: hcd_dwc2.c:199
static void handle_channel_irq(uint8_t rhport, bool in_isr)
Definition: hcd_dwc2.c:1113
void hcd_int_disable(uint8_t rhport)
Definition: hcd_dwc2.c:385
static TU_ATTR_ALWAYS_INLINE bool dma_host_enabled(const dwc2_regs_t *dwc2)
Definition: hcd_dwc2.c:129
bool hcd_setup_send(uint8_t rhport, uint8_t dev_addr, const uint8_t setup_packet[8])
Definition: hcd_dwc2.c:655
bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t *desc_ep)
Definition: hcd_dwc2.c:445
bool hcd_configure(uint8_t rhport, uint32_t cfg_id, const void *cfg_param)
Definition: hcd_dwc2.c:322
static TU_ATTR_ALWAYS_INLINE uint8_t channel_find_enabled(dwc2_regs_t *dwc2, uint8_t dev_addr, uint8_t ep_num, uint8_t ep_dir)
Definition: hcd_dwc2.c:184
static TU_ATTR_ALWAYS_INLINE uint8_t channel_alloc(dwc2_regs_t *dwc2)
Definition: hcd_dwc2.c:136
static TU_ATTR_ALWAYS_INLINE tusb_speed_t hprt_speed_get(dwc2_regs_t *dwc2)
Definition: hcd_dwc2.c:115
static TU_ATTR_ALWAYS_INLINE bool channel_disable(const dwc2_regs_t *dwc2, dwc2_channel_t *channel)
Definition: hcd_dwc2.c:168
static void handle_rxflvl_irq(uint8_t rhport)
Definition: hcd_dwc2.c:739
@ HCD_XFER_PERIOD_SPLIT_NYET_MAX
Definition: hcd_dwc2.c:56
static TU_ATTR_ALWAYS_INLINE bool channel_send_in_token(const dwc2_regs_t *dwc2, dwc2_channel_t *channel)
Definition: hcd_dwc2.c:177
static bool handle_channel_in_dma(dwc2_regs_t *dwc2, uint8_t ch_id, uint32_t hcint)
Definition: hcd_dwc2.c:972
static TU_ATTR_ALWAYS_INLINE void print_hcint(uint32_t hcint)
Definition: hcd_dwc2.c:721
static void channel_xfer_out_wrapup(dwc2_regs_t *dwc2, uint8_t ch_id)
Definition: hcd_dwc2.c:495
static TU_ATTR_ALWAYS_INLINE uint8_t req_queue_avail(const dwc2_regs_t *dwc2, bool is_period)
Definition: hcd_dwc2.c:154
bool hcd_edpt_clear_stall(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr)
Definition: hcd_dwc2.c:665
void hcd_int_enable(uint8_t rhport)
Definition: hcd_dwc2.c:380
@ HCD_XFER_ERROR_MAX
Definition: hcd_dwc2.c:52
TU_VERIFY_STATIC(CFG_TUH_DWC2_ENDPOINT_MAX<=255, "currently only use 8-bit for index")
bool hcd_edpt_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr, uint8_t *buffer, uint16_t buflen)
Definition: hcd_dwc2.c:611
static TU_ATTR_ALWAYS_INLINE bool edpt_is_periodic(uint8_t ep_type)
Definition: hcd_dwc2.c:150
void hcd_device_close(uint8_t rhport, uint8_t dev_addr)
Definition: hcd_dwc2.c:430
bool hcd_edpt_abort_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr)
Definition: hcd_dwc2.c:633
static void port0_enable(dwc2_regs_t *dwc2, tusb_speed_t speed)
Definition: hcd_dwc2.c:1184
void hcd_port_reset_end(uint8_t rhport)
Definition: hcd_dwc2.c:415
static void handle_hprt_irq(uint8_t rhport, bool in_isr)
Definition: hcd_dwc2.c:1232
static bool handle_sof_irq(uint8_t rhport, bool in_isr)
Definition: hcd_dwc2.c:1157
void hcd_port_reset(uint8_t rhport)
Definition: hcd_dwc2.c:407
bool hcd_port_connect_status(uint8_t rhport)
Definition: hcd_dwc2.c:400
static TU_ATTR_ALWAYS_INLINE uint16_t cal_packet_count(uint16_t len, uint16_t ep_size)
Definition: hcd_dwc2.c:224
static bool handle_channel_out_dma(dwc2_regs_t *dwc2, uint8_t ch_id, uint32_t hcint)
Definition: hcd_dwc2.c:1052
uint32_t hcd_frame_number(uint8_t rhport)
Definition: hcd_dwc2.c:390
static void dfifo_host_init(uint8_t rhport)
Definition: hcd_dwc2.c:283
static TU_ATTR_ALWAYS_INLINE uint8_t edpt_find_opened(uint8_t dev_addr, uint8_t ep_num, uint8_t ep_dir)
Definition: hcd_dwc2.c:213
static bool edpt_xfer_kickoff(dwc2_regs_t *dwc2, uint8_t ep_id)
Definition: hcd_dwc2.c:600
hcd_data_t _hcd_data
Definition: hcd_dwc2.c:110
tusb_speed_t hcd_port_speed_get(uint8_t rhport)
Definition: hcd_dwc2.c:423
static TU_ATTR_ALWAYS_INLINE void channel_dealloc(dwc2_regs_t *dwc2, uint8_t ch_id)
Definition: hcd_dwc2.c:162
void hcd_int_handler(uint8_t rhport, bool in_isr)
Definition: hcd_dwc2.c:1271
static bool handle_channel_out_slave(dwc2_regs_t *dwc2, uint8_t ch_id, uint32_t hcint)
Definition: hcd_dwc2.c:905
static void channel_xfer_in_retry(dwc2_regs_t *dwc2, uint8_t ch_id, uint32_t hcint)
Definition: hcd_dwc2.c:681
bool hcd_init(uint8_t rhport, const tusb_rhport_init_t *rh_init)
Definition: hcd_dwc2.c:331
static bool handle_txfifo_empty(dwc2_regs_t *dwc2, bool is_periodic)
Definition: hcd_dwc2.c:781
@ HPRT_W1_MASK
Definition: hcd_dwc2.c:48
static bool handle_channel_in_slave(dwc2_regs_t *dwc2, uint8_t ch_id, uint32_t hcint)
Definition: hcd_dwc2.c:814
static TU_ATTR_ALWAYS_INLINE uint8_t cal_next_pid(uint8_t pid, uint8_t packet_count)
Definition: hcd_dwc2.c:232
uint8_t const * buffer
Definition: midi_device.h:100
AUDIO Channel Cluster Descriptor (4.1)
Definition: audio.h:647
uint32_t pid
Definition: dwc2_type.h:436
uint32_t num_host_ch
Definition: dwc2_type.h:310
uint32_t ep_type
Definition: dwc2_type.h:414
uint32_t split_en
Definition: dwc2_type.h:429
uint8_t ep_num
Definition: hcd_max3421.c:187
uint32_t packet_count
Definition: dwc2_type.h:435
uint32_t hub_addr
Definition: dwc2_type.h:425
uint8_t bInterval
Definition: tusb_types.h:372
uint8_t dev_addr
Definition: hcd_khci.c:105
uint8_t bmAttributes
See: audio_clock_source_attribute_t.
Definition: audio.h:672
uint32_t byte_count
Definition: dwc2_type.h:294
uint32_t err_multi_count
Definition: dwc2_type.h:415
uint32_t req_queue_available
Definition: dwc2_type.h:370
uint32_t packet_status
Definition: dwc2_type.h:296
uint32_t phy_if16
Definition: dwc2_type.h:244
uint32_t xfer_size
Definition: dwc2_type.h:434
uint32_t arch
Definition: dwc2_type.h:305
uint32_t fifo_available
Definition: dwc2_type.h:369
uint32_t conn_status
Definition: dwc2_type.h:390
uint32_t ep_ch_num
Definition: dwc2_type.h:293
uint32_t speed
Definition: dwc2_type.h:403
uint32_t phy_sel
Definition: dwc2_type.h:247
uint32_t low_speed_dev
Definition: dwc2_type.h:413
uint32_t hub_port
Definition: dwc2_type.h:424
uint8_t bEndpointAddress
Definition: video.h:306
uint32_t odd_frame
Definition: dwc2_type.h:417
uint32_t enable
Definition: dwc2_type.h:392
uint32_t xact_pos
Definition: dwc2_type.h:426
uint32_t ep_dir
Definition: dwc2_type.h:411
uint32_t ulpi_utmi_sel
Definition: dwc2_type.h:245
uint32_t ep_size
Definition: dwc2_type.h:409
uint32_t split_compl
Definition: dwc2_type.h:427
volatile dwc2_channel_tsize_t hctsiz_bm
Definition: dwc2_type.h:461
volatile uint32_t hcint
Definition: dwc2_type.h:457
volatile uint32_t hcsplt
Definition: dwc2_type.h:454
volatile dwc2_channel_char_t hcchar_bm
Definition: dwc2_type.h:451
volatile uint32_t hctsiz
Definition: dwc2_type.h:460
volatile uint32_t hcintmsk
Definition: dwc2_type.h:458
volatile dwc2_channel_split_t hcsplt_bm
Definition: dwc2_type.h:455
volatile uint32_t hcdma
Definition: dwc2_type.h:463
volatile uint32_t hcchar
Definition: dwc2_type.h:450
volatile uint32_t gahbcfg
Definition: dwc2_type.h:549
volatile uint32_t hcfg
Definition: dwc2_type.h:608
volatile uint32_t gnptxfsiz
Definition: dwc2_type.h:570
volatile dwc2_hptxsts_t hptxsts_bm
Definition: dwc2_type.h:617
volatile uint32_t hfir
Definition: dwc2_type.h:609
volatile uint32_t grxfsiz
Definition: dwc2_type.h:567
volatile uint32_t gusbcfg
Definition: dwc2_type.h:553
volatile uint32_t hptxfsiz
Definition: dwc2_type.h:603
volatile dwc2_hprt_t hprt_bm
Definition: dwc2_type.h:625
volatile uint32_t gsnpsid
Definition: dwc2_type.h:584
volatile uint32_t hfnum
Definition: dwc2_type.h:611
volatile uint32_t haintmsk
Definition: dwc2_type.h:620
volatile dwc2_grxstsp_t grxstsp_bm
Definition: dwc2_type.h:565
volatile uint32_t hprt
Definition: dwc2_type.h:624
volatile dwc2_ghwcfg2_t ghwcfg2_bm
Definition: dwc2_type.h:588
volatile uint32_t hptxsts
Definition: dwc2_type.h:616
dwc2_channel_t channel[16]
Definition: dwc2_type.h:630
volatile uint32_t hnptxsts
Definition: dwc2_type.h:573
volatile dwc2_hnptxsts_t hnptxsts_bm
Definition: dwc2_type.h:574
volatile dwc2_gusbcfg_t gusbcfg_bm
Definition: dwc2_type.h:554
volatile uint32_t gintmsk
Definition: dwc2_type.h:561
volatile uint32_t gintsts
Definition: dwc2_type.h:560
volatile uint32_t gdfifocfg
Definition: dwc2_type.h:600
volatile uint32_t haint
Definition: dwc2_type.h:619
hcd_xfer_t xfer[DWC2_CHANNEL_COUNT_MAX]
Definition: hcd_dwc2.c:106
hcd_endpoint_t edpt[CFG_TUH_DWC2_ENDPOINT_MAX]
Definition: hcd_dwc2.c:107
uint8_t hub_port
Definition: hcd.h:96
uint8_t hub_addr
Definition: hcd.h:95
uint8_t speed
Definition: hcd.h:97
uint32_t hcsplt
Definition: hcd_dwc2.c:70
uint32_t hcchar
Definition: hcd_dwc2.c:66
dwc2_channel_split_t hcsplt_bm
Definition: hcd_dwc2.c:71
uint32_t uframe_countdown
Definition: hcd_dwc2.c:82
dwc2_channel_char_t hcchar_bm
Definition: hcd_dwc2.c:67
uint16_t buflen
Definition: hcd_dwc2.c:85
uint8_t * buffer
Definition: hcd_dwc2.c:84
uint8_t period_split_nyet_count
Definition: hcd_dwc2.c:94
uint16_t fifo_bytes
Definition: hcd_dwc2.c:102
volatile bool allocated
Definition: hcd_dwc2.c:90
uint16_t xferred_bytes
Definition: hcd_dwc2.c:100
uint8_t result
Definition: hcd_dwc2.c:98
uint8_t ep_id
Definition: hcd_dwc2.c:91
static TU_ATTR_ALWAYS_INLINE uint32_t tu_div_ceil(uint32_t v, uint32_t d)
Definition: tusb_common.h:179
static TU_ATTR_ALWAYS_INLINE uint16_t tu_min16(uint16_t x, uint16_t y)
Definition: tusb_common.h:155
static TU_ATTR_ALWAYS_INLINE uint32_t tu_min32(uint32_t x, uint32_t y)
Definition: tusb_common.h:156
static TU_ATTR_ALWAYS_INLINE bool tu_bit_test(uint32_t value, uint8_t pos)
Definition: tusb_common.h:151
static TU_ATTR_ALWAYS_INLINE uint32_t tu_u32_from_u16(uint16_t high, uint16_t low)
Definition: tusb_common.h:129
@ TUSB_DIR_IN
Definition: tusb_types.h:67
@ TUSB_DIR_OUT
Definition: tusb_types.h:66
@ TUSB_EPSIZE_BULK_HS
Definition: tusb_types.h:74
@ TUSB_EPSIZE_BULK_FS
Definition: tusb_types.h:73
@ TUSB_EPSIZE_ISO_HS_MAX
Definition: tusb_types.h:77
tusb_speed_t
defined base on EHCI specs value for Endpoint Speed
Definition: tusb_types.h:49
@ TUSB_SPEED_INVALID
Definition: tusb_types.h:54
@ TUSB_SPEED_FULL
Definition: tusb_types.h:50
@ TUSB_SPEED_LOW
Definition: tusb_types.h:51
@ TUSB_SPEED_HIGH
Definition: tusb_types.h:52
static TU_ATTR_ALWAYS_INLINE uint8_t tu_edpt_number(uint8_t addr)
Definition: tusb_types.h:507
@ XFER_RESULT_FAILED
Definition: tusb_types.h:238
@ XFER_RESULT_SUCCESS
Definition: tusb_types.h:237
@ XFER_RESULT_INVALID
Definition: tusb_types.h:241
@ XFER_RESULT_STALLED
Definition: tusb_types.h:239
static TU_ATTR_ALWAYS_INLINE uint16_t tu_edpt_packet_size(tusb_desc_endpoint_t const *desc_ep)
Definition: tusb_types.h:515
@ TUSB_XFER_ISOCHRONOUS
Definition: tusb_types.h:60
@ TUSB_XFER_INTERRUPT
Definition: tusb_types.h:62
TU_ATTR_PACKED_END TU_ATTR_BIT_FIELD_ORDER_END static TU_ATTR_ALWAYS_INLINE tusb_dir_t tu_edpt_dir(uint8_t addr)
Definition: tusb_types.h:502
static TU_ATTR_ALWAYS_INLINE uint8_t tu_edpt_addr(uint8_t num, uint8_t dir)
Definition: tusb_types.h:511
@ TUSB_INDEX_INVALID_8
Definition: tusb_types.h:274
volatile uint16_t actual_len
Definition: usbh.c:266