1
#define NWL_RING_GET(data, offset) *(uint32_t*)(((char*)(data)) + (offset))
2
#define NWL_RING_SET(data, offset, val) *(uint32_t*)(((char*)(data)) + (offset)) = (val)
3
#define NWL_RING_UPDATE(data, offset, mask, val) *(uint32_t*)(((char*)(data)) + (offset)) = ((*(uint32_t*)(((char*)(data)) + (offset)))&(mask))|(val)
5
static int dma_nwl_compute_read_s2c_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
8
char *base = info->base_addr;
10
nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
11
if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
12
if (val < ring_pa) pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_SW_NEXT_BD register value (%lx) is below start of ring [%lx,%lx])", val, ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
13
else pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu / %u) is fractal)", val - ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
14
return PCILIB_ERROR_INVALID_STATE;
17
info->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
18
if (info->head >= PCILIB_NWL_DMA_PAGES) {
19
pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu) out of range)", info->head);
20
return PCILIB_ERROR_INVALID_STATE;
23
nwl_read_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
24
if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
25
if (val < ring_pa) pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%lx) is below start of ring [%lx,%lx])", val, ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
26
else pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%zu / %u) is fractal)", val - ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
27
return PCILIB_ERROR_INVALID_STATE;
30
info->tail = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
31
if (info->tail >= PCILIB_NWL_DMA_PAGES) {
32
pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%zu) out of range)", info->tail);
33
return PCILIB_ERROR_INVALID_STATE;
37
printf("S2C: %lu %lu\n", info->tail, info->head);
38
#endif /* DEBUG_NWL */
43
static int dma_nwl_compute_read_c2s_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
46
char *base = info->base_addr;
48
nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
49
if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
50
if (val < ring_pa) pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%lx) is below start of the ring [%lx,%lx])", val, ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
51
else pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu / %u) is fractal)", val - ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
52
return PCILIB_ERROR_INVALID_STATE;
55
info->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
56
if (info->head >= PCILIB_NWL_DMA_PAGES) {
57
pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu) out of range)", info->head);
58
return PCILIB_ERROR_INVALID_STATE;
61
info->tail = info->head + 1;
62
if (info->tail == PCILIB_NWL_DMA_PAGES) info->tail = 0;
65
printf("C2S: %lu %lu\n", info->tail, info->head);
66
#endif /* DEBUG_NWL */
72
static int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
81
pcilib_kmem_reuse_state_t reuse_ring, reuse_pages;
82
pcilib_kmem_flags_t flags;
83
pcilib_kmem_type_t type;
85
char *base = info->base_addr;
87
if (info->pages) return 0;
89
// Or bidirectional specified by 0x0|addr, or read 0x0|addr and write 0x80|addr
90
type = (info->desc.direction == PCILIB_DMA_TO_DEVICE)?PCILIB_KMEM_TYPE_DMA_S2C_PAGE:PCILIB_KMEM_TYPE_DMA_C2S_PAGE;
91
sub_use = info->desc.addr|((info->desc.direction == PCILIB_DMA_TO_DEVICE)?0x80:0x00);
92
flags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(info->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
94
pcilib_kmem_handle_t *ring = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, PCILIB_NWL_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, sub_use), flags);
95
pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->pcilib, type, PCILIB_NWL_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, sub_use), flags);
98
if (pages) pcilib_free_kernel_memory(ctx->pcilib, pages, 0);
99
if (ring) pcilib_free_kernel_memory(ctx->pcilib, ring, 0);
100
return PCILIB_ERROR_MEMORY;
103
reuse_ring = pcilib_kmem_is_reused(ctx->pcilib, ring);
104
reuse_pages = pcilib_kmem_is_reused(ctx->pcilib, pages);
106
// I guess idea here was that we not need to check all that stuff during the second iteration
107
// which is basicaly true (shall we expect any driver-triggered changes or parallel accesses?)
108
// but still we need to set preserve flag (and that if we enforcing preservation --start-dma).
109
// Probably having checks anyway is not harming...
110
// if (!info->preserve) {
111
if (reuse_ring == reuse_pages) {
112
if (reuse_ring & PCILIB_KMEM_REUSE_PARTIAL) pcilib_warning("Inconsistent DMA buffers are found (only part of required buffers is available), reinitializing...");
113
else if (reuse_ring & PCILIB_KMEM_REUSE_REUSED) {
114
if ((reuse_ring & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
115
else if ((reuse_ring & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
117
nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
119
if ((val&DMA_ENG_RUNNING) == 0) pcilib_warning("Lost DMA buffers are found (DMA engine is stopped), reinitializing...");
123
} else pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
127
unsigned char *data = (unsigned char*)pcilib_kmem_get_ua(ctx->pcilib, ring);
128
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, ring);
131
if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) err = dma_nwl_compute_read_c2s_pointers(ctx, info, data, ring_pa);
132
else err = dma_nwl_compute_read_s2c_pointers(ctx, info, data, ring_pa);
134
if (err) preserve = 0;
139
buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, 0);
143
memset(data, 0, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
145
for (i = 0; i < PCILIB_NWL_DMA_PAGES; i++, data += PCILIB_NWL_DMA_DESCRIPTOR_SIZE) {
146
buf_pa = pcilib_kmem_get_block_pa(ctx->pcilib, pages, i);
147
buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, i);
149
NWL_RING_SET(data, DMA_BD_NDESC_OFFSET, ring_pa + ((i + 1) % PCILIB_NWL_DMA_PAGES) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
150
NWL_RING_SET(data, DMA_BD_BUFAL_OFFSET, buf_pa&0xFFFFFFFF);
151
NWL_RING_SET(data, DMA_BD_BUFAH_OFFSET, buf_pa>>32);
152
#ifdef NWL_GENERATE_DMA_IRQ
153
NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz | DMA_BD_INT_ERROR_MASK | DMA_BD_INT_COMP_MASK);
154
#else /* NWL_GENERATE_DMA_IRQ */
155
NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz);
156
#endif /* NWL_GENERATE_DMA_IRQ */
160
nwl_write_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
161
nwl_write_register(val, ctx, base, REG_SW_NEXT_BD);
169
info->page_size = buf_sz;
170
info->ring_size = PCILIB_NWL_DMA_PAGES;
176
static size_t dma_nwl_clean_buffers(nwl_dma_t * ctx, pcilib_nwl_engine_description_t *info) {
180
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
181
ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
184
status = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET)&DMA_BD_STATUS_MASK;
185
// control = NWL_RING_GET(ring, DMA_BD_BUFL_CTRL_OFFSET)&DMA_BD_CTRL_MASK;
187
if (status & DMA_BD_ERROR_MASK) {
188
pcilib_error("NWL DMA Engine reported error in ring descriptor");
192
if (status & DMA_BD_SHORT_MASK) {
193
pcilib_error("NWL DMA Engine reported short error");
197
if (status & DMA_BD_COMP_MASK) {
199
if (info->tail == info->ring_size) {
200
ring -= (info->tail - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
203
ring += PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
208
if (info->tail != info->head) goto next_buffer;
211
// printf("====> Cleaned: %i\n", res);
216
static size_t dma_nwl_get_next_buffer(nwl_dma_t * ctx, pcilib_nwl_engine_description_t *info, size_t n_buffers, pcilib_timeout_t timeout) {
217
struct timeval start, cur;
222
for (head = info->head; (((head + 1)%info->ring_size) != info->tail)&&(n < n_buffers); head++, n++);
223
if (n == n_buffers) return info->head;
225
gettimeofday(&start, NULL);
227
res = dma_nwl_clean_buffers(ctx, info);
228
if (res == (size_t)-1) return PCILIB_DMA_BUFFER_INVALID;
232
while (n < n_buffers) {
233
if (timeout != PCILIB_TIMEOUT_INFINITE) {
234
gettimeofday(&cur, NULL);
235
if (((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) > timeout) break;
240
res = dma_nwl_clean_buffers(ctx, info);
241
if (res == (size_t)-1) return PCILIB_DMA_BUFFER_INVALID;
243
gettimeofday(&start, NULL);
248
if (n < n_buffers) return PCILIB_DMA_BUFFER_INVALID;
253
static int dma_nwl_push_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, size_t size, int eop, pcilib_timeout_t timeout) {
257
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
258
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
260
ring += info->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
263
if (!info->writting) {
264
flags |= DMA_BD_SOP_MASK;
268
flags |= DMA_BD_EOP_MASK;
272
NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, size|flags);
273
NWL_RING_SET(ring, DMA_BD_BUFL_STATUS_OFFSET, size);
276
if (info->head == info->ring_size) info->head = 0;
278
val = ring_pa + info->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
279
nwl_write_register(val, ctx, info->base_addr, REG_SW_NEXT_BD);
285
static size_t dma_nwl_wait_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, size_t *size, int *eop, pcilib_timeout_t timeout) {
286
struct timeval start, cur;
287
uint32_t status_size, status;
289
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
291
ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
293
gettimeofday(&start, NULL);
296
status_size = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
297
status = status_size & DMA_BD_STATUS_MASK;
299
if (status & DMA_BD_ERROR_MASK) {
300
pcilib_error("NWL DMA Engine reported error in ring descriptor");
304
if (status & DMA_BD_COMP_MASK) {
305
if (status & DMA_BD_EOP_MASK) *eop = 1;
308
*size = status_size & DMA_BD_BUFL_MASK;
312
if ((info->tail + 1) == info->ring_size) ring -= info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
313
else ring += PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
314
*mrd = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET)&DMA_BD_COMP_MASK;
322
gettimeofday(&cur, NULL);
323
} while ((timeout == PCILIB_TIMEOUT_INFINITE)||(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < timeout));
329
// This function is not used now, but we may need it in the future
330
static int dma_nwl_is_overflown(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
332
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
333
if (info->tail > 0) ring += (info->tail - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
334
else ring += (info->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
336
status = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
337
return status&DMA_BD_COMP_MASK?1:0;
341
static int dma_nwl_return_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
344
unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
345
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
346
size_t bufsz = pcilib_kmem_get_block_size(ctx->pcilib, info->pages, info->tail);
348
ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
350
#ifdef NWL_GENERATE_DMA_IRQ
351
NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, bufsz | DMA_BD_INT_ERROR_MASK | DMA_BD_INT_COMP_MASK);
352
#else /* NWL_GENERATE_DMA_IRQ */
353
NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, bufsz);
354
#endif /* NWL_GENERATE_DMA_IRQ */
356
NWL_RING_SET(ring, DMA_BD_BUFL_STATUS_OFFSET, 0);
358
val = ring_pa + info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
359
nwl_write_register(val, ctx, info->base_addr, REG_SW_NEXT_BD);
362
if (info->tail == info->ring_size) info->tail = 0;
367
int dma_nwl_get_status(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_engine_status_t *status, size_t n_buffers, pcilib_dma_buffer_status_t *buffers) {
370
nwl_dma_t *ctx = (nwl_dma_t*)vctx;
371
pcilib_nwl_engine_description_t *info = ctx->engines + dma;
372
unsigned char *ring = (unsigned char*)pcilib_kmem_get_ua(ctx->pcilib, info->ring);
375
if (!status) return -1;
377
status->started = info->started;
378
status->ring_size = info->ring_size;
379
status->buffer_size = info->page_size;
380
status->ring_tail = info->tail;
382
if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) {
384
for (i = 0; i < info->ring_size; i++) {
385
pos = status->ring_tail + i;
386
if (pos >= info->ring_size) pos -= info->ring_size;
388
bstatus = NWL_RING_GET(ring + pos * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, DMA_BD_BUFL_STATUS_OFFSET);
389
if ((bstatus&(DMA_BD_ERROR_MASK|DMA_BD_COMP_MASK)) == 0) break;
391
status->ring_head = pos;
393
status->ring_head = info->head;
398
for (i = 0; (i < info->ring_size)&&(i < n_buffers); i++) {
399
bstatus = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
401
buffers[i].error = bstatus & (DMA_BD_ERROR_MASK/*|DMA_BD_SHORT_MASK*/);
402
buffers[i].used = bstatus & DMA_BD_COMP_MASK;
403
buffers[i].size = bstatus & DMA_BD_BUFL_MASK;
404
buffers[i].first = bstatus & DMA_BD_SOP_MASK;
405
buffers[i].last = bstatus & DMA_BD_EOP_MASK;
407
ring += PCILIB_NWL_DMA_DESCRIPTOR_SIZE;