Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
140 changes: 103 additions & 37 deletions src/portable/synopsys/dwc2/hcd_dwc2.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,19 +32,19 @@
#error DWC2 require either CFG_TUH_DWC2_SLAVE_ENABLE or CFG_TUH_DWC2_DMA_ENABLE to be enabled
#endif

// Debug level for DWC2
#define DWC2_DEBUG 2

#include "host/hcd.h"
#include "host/usbh.h"
#include "dwc2_common.h"

// Max number of endpoints application can open, can be larger than DWC2_CHANNEL_COUNT_MAX
#ifndef CFG_TUH_DWC2_ENDPOINT_MAX
#define CFG_TUH_DWC2_ENDPOINT_MAX 16
#endif
// Debug level for DWC2
#define DWC2_DEBUG 2

// Max number of endpoints application can open, can be larger than DWC2_CHANNEL_COUNT_MAX
#ifndef CFG_TUH_DWC2_ENDPOINT_MAX
#define CFG_TUH_DWC2_ENDPOINT_MAX 16u
#endif

#define DWC2_CHANNEL_COUNT_MAX 16 // absolute max channel count
#define DWC2_CHANNEL_COUNT_MAX 16u // absolute max channel count
TU_VERIFY_STATIC(CFG_TUH_DWC2_ENDPOINT_MAX <= 255, "currently only use 8-bit for index");

enum {
Expand Down Expand Up @@ -79,7 +79,8 @@ typedef struct {
uint32_t speed : 2;
uint32_t next_pid : 2; // PID for next transfer
uint32_t next_do_ping : 1; // Do PING for next transfer if possible (highspeed OUT)
// uint32_t : 9;
uint32_t closing : 1; // endpoint is closing
// uint32_t : 8;
};

uint32_t uframe_countdown; // micro-frame count down to transfer for periodic, only need 18-bit
Expand All @@ -96,6 +97,7 @@ typedef struct {
uint8_t err_count : 3;
uint8_t period_split_nyet_count : 3;
uint8_t halted_nyet : 1;
uint8_t closing : 1; // closing channel
};
uint8_t result;

Expand Down Expand Up @@ -195,16 +197,31 @@ TU_ATTR_ALWAYS_INLINE static inline void channel_dealloc(dwc2_regs_t* dwc2, uint
}

TU_ATTR_ALWAYS_INLINE static inline bool channel_disable(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
// disable also require request queue
TU_ASSERT(req_queue_avail(dwc2, channel_is_periodic(channel->hcchar)));
const bool is_period = channel_is_periodic(channel->hcchar);
if (dma_host_enabled(dwc2)) {
// In buffer DMA or external DMA mode:
// - Channel disable must not be programmed for non-split periodic channels. At the end of the next uframe/frame (in
// the worst case), the controller generates a channel halted and disables the channel automatically.
// - For split enabled channels (both non-periodic and periodic), channel disable must not be programmed randomly.
// However, channel disable can be programmed for specific scenarios such as NAK and FrmOvrn.
if (is_period && (channel->hcsplt & HCSPLT_SPLITEN)) {
return true;
}
} else {
while (0 == req_queue_avail(dwc2, is_period)) {
// blocking wait for request queue available
}
}
channel->hcintmsk |= HCINT_HALTED;
channel->hcchar |= HCCHAR_CHDIS | HCCHAR_CHENA; // must set both CHDIS and CHENA
return true;
}

// attempt to send IN token to receive data
TU_ATTR_ALWAYS_INLINE static inline bool channel_send_in_token(const dwc2_regs_t* dwc2, dwc2_channel_t* channel) {
TU_ASSERT(req_queue_avail(dwc2, channel_is_periodic(channel->hcchar)));
while (0 == req_queue_avail(dwc2, channel_is_periodic(channel->hcchar))) {
// blocking wait for request queue available
}
channel->hcchar |= HCCHAR_CHENA;
return true;
}
Expand Down Expand Up @@ -237,13 +254,37 @@ TU_ATTR_ALWAYS_INLINE static inline uint8_t edpt_alloc(void) {
return TUSB_INDEX_INVALID_8;
}

// Find a endpoint that is opened previously with hcd_edpt_open()
TU_ATTR_ALWAYS_INLINE static inline void edpt_dealloc(hcd_endpoint_t *edpt) {
edpt->hcchar_bm.enable = 0;
}

// close an opened endpoint
static void edpt_close(dwc2_regs_t *dwc2, uint8_t ep_id) {
hcd_endpoint_t *edpt = &_hcd_data.edpt[ep_id];
edpt->closing = 1; // mark endpoint as closing

// disable active channel belong to this endpoint
for (uint8_t ch_id = 0; ch_id < DWC2_CHANNEL_COUNT_MAX; ch_id++) {
hcd_xfer_t *xfer = &_hcd_data.xfer[ch_id];
if (xfer->allocated && xfer->ep_id == ep_id) {
dwc2_channel_t *channel = &dwc2->channel[ch_id];
xfer->closing = 1;
channel_disable(dwc2, channel);
return; // only 1 active channel per endpoint
}
}

edpt_dealloc(edpt); // no active channel, safe to de-alloc now
}

// Find an endpoint that is opened previously with hcd_edpt_open()
// Note: EP0 is bidirectional
TU_ATTR_ALWAYS_INLINE static inline uint8_t edpt_find_opened(uint8_t dev_addr, uint8_t ep_num, uint8_t ep_dir) {
for (uint8_t i = 0; i < (uint8_t)CFG_TUH_DWC2_ENDPOINT_MAX; i++) {
const dwc2_channel_char_t* hcchar_bm = &_hcd_data.edpt[i].hcchar_bm;
if (hcchar_bm->enable && hcchar_bm->dev_addr == dev_addr &&
hcchar_bm->ep_num == ep_num && (ep_num == 0 || hcchar_bm->ep_dir == ep_dir)) {
const hcd_endpoint_t *edpt = &_hcd_data.edpt[i];
const dwc2_channel_char_t hcchar_bm = edpt->hcchar_bm;
if (hcchar_bm.enable && hcchar_bm.dev_addr == dev_addr && hcchar_bm.ep_num == ep_num &&
(ep_num == 0 || hcchar_bm.ep_dir == ep_dir)) {
return i;
}
}
Expand Down Expand Up @@ -456,11 +497,11 @@ tusb_speed_t hcd_port_speed_get(uint8_t rhport) {

// HCD closes all opened endpoints belong to this device
void hcd_device_close(uint8_t rhport, uint8_t dev_addr) {
(void) rhport;
for (uint8_t i = 0; i < (uint8_t) CFG_TUH_DWC2_ENDPOINT_MAX; i++) {
hcd_endpoint_t* edpt = &_hcd_data.edpt[i];
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
for (uint8_t ep_id = 0; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX; ep_id++) {
const hcd_endpoint_t *edpt = &_hcd_data.edpt[ep_id];
if (edpt->hcchar_bm.enable && edpt->hcchar_bm.dev_addr == dev_addr) {
tu_memclr(edpt, sizeof(hcd_endpoint_t));
edpt_close(dwc2, ep_id);
}
}
}
Expand Down Expand Up @@ -520,8 +561,15 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t*
}

bool hcd_edpt_close(uint8_t rhport, uint8_t daddr, uint8_t ep_addr) {
(void) rhport; (void) daddr; (void) ep_addr;
return false; // TODO not implemented yet
dwc2_regs_t *dwc2 = DWC2_REG(rhport);
const uint8_t ep_num = tu_edpt_number(ep_addr);
const uint8_t ep_dir = tu_edpt_dir(ep_addr);
const uint8_t ep_id = edpt_find_opened(daddr, ep_num, ep_dir);
TU_ASSERT(ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);

edpt_close(dwc2, ep_id);

return true;
}

// clean up channel after part of transfer is done but the whole urb is not complete
Expand Down Expand Up @@ -590,8 +638,7 @@ static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
channel->hcint = 0xFFFFFFFFU; // clear all channel interrupts

if (dma_host_enabled(dwc2)) {
uint32_t hcintmsk = HCINT_HALTED;
channel->hcintmsk = hcintmsk;
channel->hcintmsk = HCINT_HALTED;
dwc2->haintmsk |= TU_BIT(ch_id);

channel->hcdma = (uint32_t) edpt->buffer;
Expand Down Expand Up @@ -646,15 +693,15 @@ static bool edpt_xfer_kickoff(dwc2_regs_t* dwc2, uint8_t ep_id) {
return channel_xfer_start(dwc2, ch_id);
}

// Submit a transfer, when complete hcd_event_xfer_complete() must be invoked
bool hcd_edpt_xfer(uint8_t rhport, uint8_t dev_addr, uint8_t ep_addr, uint8_t * buffer, uint16_t buflen) {
dwc2_regs_t* dwc2 = DWC2_REG(rhport);
const uint8_t ep_num = tu_edpt_number(ep_addr);
const uint8_t ep_dir = tu_edpt_dir(ep_addr);

uint8_t ep_id = edpt_find_opened(dev_addr, ep_num, ep_dir);
TU_ASSERT(ep_id < CFG_TUH_DWC2_ENDPOINT_MAX);
hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
hcd_endpoint_t *edpt = &_hcd_data.edpt[ep_id];
TU_VERIFY(edpt->closing == 0); // skip if endpoint is closing

edpt->buffer = buffer;
edpt->buflen = buflen;
Expand Down Expand Up @@ -945,6 +992,8 @@ static bool handle_channel_in_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t h
is_done = true;
} else if (xfer->err_count == HCD_XFER_ERROR_MAX) {
xfer->result = XFER_RESULT_FAILED;
is_done = true;
} else if (xfer->closing) {
is_done = true;
} else {
// got here due to NAK or NYET
Expand Down Expand Up @@ -1006,6 +1055,8 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
} else if (xfer->err_count == HCD_XFER_ERROR_MAX) {
xfer->result = XFER_RESULT_FAILED;
is_done = true;
} else if (xfer->closing) {
is_done = true;
} else {
// Got here due to NAK or NYET
TU_ASSERT(channel_xfer_start(dwc2, ch_id));
Expand Down Expand Up @@ -1122,6 +1173,10 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
// retry start-split in next binterval
channel_xfer_in_retry(dwc2, ch_id, hcint);
}

if (xfer->closing) {
is_done = true;
}
}

return is_done;
Expand Down Expand Up @@ -1182,6 +1237,10 @@ static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hc
channel->hcchar |= HCCHAR_CHENA;
}
}

if (xfer->closing) {
is_done = true;
}
} else if (hcint & HCINT_ACK) {
xfer->err_count = 0;
channel->hcintmsk &= ~HCINT_ACK;
Expand Down Expand Up @@ -1226,12 +1285,17 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
} else {
is_done = handle_channel_in_slave(dwc2, ch_id, hcint);
}
#endif
#endif
}

if (is_done) {
const uint8_t ep_addr = tu_edpt_addr(hcchar.ep_num, hcchar.ep_dir);
hcd_event_xfer_complete(hcchar.dev_addr, ep_addr, xfer->xferred_bytes, (xfer_result_t)xfer->result, in_isr);
if (xfer->closing) {
hcd_endpoint_t *edpt = &_hcd_data.edpt[xfer->ep_id];
edpt_dealloc(edpt);
} else {
const uint8_t ep_addr = tu_edpt_addr(hcchar.ep_num, hcchar.ep_dir);
hcd_event_xfer_complete(hcchar.dev_addr, ep_addr, xfer->xferred_bytes, (xfer_result_t)xfer->result, in_isr);
}
channel_dealloc(dwc2, ch_id);
}
}
Expand All @@ -1250,16 +1314,18 @@ static bool handle_sof_irq(uint8_t rhport, bool in_isr) {
const uint32_t ucount = (hprt_speed_get(dwc2) == TUSB_SPEED_HIGH ? 1 : 8);

for(uint8_t ep_id = 0; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX; ep_id++) {
hcd_endpoint_t* edpt = &_hcd_data.edpt[ep_id];
if (edpt->hcchar_bm.enable && channel_is_periodic(edpt->hcchar) && edpt->uframe_countdown > 0) {
edpt->uframe_countdown -= tu_min32(ucount, edpt->uframe_countdown);
if (edpt->uframe_countdown == 0) {
if (!edpt_xfer_kickoff(dwc2, ep_id)) {
edpt->uframe_countdown = ucount; // failed to start, try again next frame
hcd_endpoint_t *edpt = &_hcd_data.edpt[ep_id];
if (edpt->closing == 0) {
if (edpt->hcchar_bm.enable && channel_is_periodic(edpt->hcchar) && edpt->uframe_countdown > 0) {
edpt->uframe_countdown -= tu_min32(ucount, edpt->uframe_countdown);
if (edpt->uframe_countdown == 0) {
if (!edpt_xfer_kickoff(dwc2, ep_id)) {
edpt->uframe_countdown = ucount; // failed to start, try again next frame
}
}
}

more_isr = true;
more_isr = true;
}
}
}

Expand Down