summaryrefslogtreecommitdiffstats
path: root/dma
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@dside.dyndns.org>2011-07-17 03:37:29 +0200
committerSuren A. Chilingaryan <csa@dside.dyndns.org>2011-07-17 03:37:29 +0200
commit112030c40f88dde281073e00e4c24cc48daa99d2 (patch)
treef6486582ee46a2b3b48bba5bc9d96c8432d0b7dc /dma
parent7ac0539951ff0eba200e64b850b5181a82915c86 (diff)
downloadpcitool-112030c40f88dde281073e00e4c24cc48daa99d2.tar.gz
pcitool-112030c40f88dde281073e00e4c24cc48daa99d2.tar.bz2
pcitool-112030c40f88dde281073e00e4c24cc48daa99d2.tar.xz
pcitool-112030c40f88dde281073e00e4c24cc48daa99d2.zip
Implement DMA access synchronization for NWL implementation
Diffstat (limited to 'dma')
-rw-r--r--dma/nwl.c4
-rw-r--r--dma/nwl.h1
-rw-r--r--dma/nwl_buffers.h157
-rw-r--r--dma/nwl_engine.c125
-rw-r--r--dma/nwl_irq.c4
5 files changed, 204 insertions, 87 deletions
diff --git a/dma/nwl.c b/dma/nwl.c
index 78a587e..0a41703 100644
--- a/dma/nwl.c
+++ b/dma/nwl.c
@@ -41,7 +41,7 @@ int dma_nwl_stop(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma
// stop everything
if (dma == PCILIB_DMA_ENGINE_INVALID) {
for (dma = 0; dma < ctx->n_engines; dma++) {
- if (flags&PCILIB_DMA_FLAG_PERMANENT) {
+ if (flags&PCILIB_DMA_FLAG_PERSISTENT) {
ctx->engines[dma].preserve = 0;
}
@@ -62,7 +62,7 @@ int dma_nwl_stop(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma
if (dma > ctx->n_engines) return PCILIB_ERROR_INVALID_BANK;
// ignorign previous setting if flag specified
- if (flags&PCILIB_DMA_FLAG_PERMANENT) {
+ if (flags&PCILIB_DMA_FLAG_PERSISTENT) {
ctx->engines[dma].preserve = 0;
}
diff --git a/dma/nwl.h b/dma/nwl.h
index 7a04dfa..8197bf9 100644
--- a/dma/nwl.h
+++ b/dma/nwl.h
@@ -38,6 +38,7 @@ struct pcilib_nwl_engine_description_s {
int started; /**< indicates that DMA buffers are initialized and reading is allowed */
int writting; /**< indicates that we are in middle of writting packet */
+ int reused; /**< indicates that DMA was found intialized, buffers were reused, and no additional initialization is needed */
int preserve; /**< indicates that DMA should not be stopped during clean-up */
};
diff --git a/dma/nwl_buffers.h b/dma/nwl_buffers.h
index a38af8c..9e60461 100644
--- a/dma/nwl_buffers.h
+++ b/dma/nwl_buffers.h
@@ -13,15 +13,101 @@ int dma_nwl_sync_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info,
return 0;
}
-int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
+static int dma_nwl_compute_read_s2c_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
+ size_t pos;
+ uint32_t val;
+
+ char *base = info->base_addr;
+
+ nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
+ if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_SW_NEXT_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ info->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
+ if (info->head >= PCILIB_NWL_DMA_PAGES) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_SW_NEXT_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ nwl_read_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
+ if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ info->tail = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
+ if (info->tail >= PCILIB_NWL_DMA_PAGES) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ return 0;
+}
+
+static int dma_nwl_compute_read_c2s_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
+ size_t pos;
+ uint32_t val;
+ size_t prev;
+
+ char *base = info->base_addr;
+
+
+ nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
+ if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_SW_NEXT_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ info->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
+ if (info->head >= PCILIB_NWL_DMA_PAGES) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_SW_NEXT_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ info->tail = info->head;
+
+
+ // Last read BD
+ nwl_read_register(val, ctx, base, REG_DMA_ENG_LAST_BD);
+ if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_DMA_ENG_LAST_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+ prev = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
+ if (prev >= PCILIB_NWL_DMA_PAGES) {
+ pcilib_warning("Inconsistent DMA Ring buffer is found (REG_DMA_ENG_LAST_BD register out of range)");
+ return PCILIB_ERROR_INVALID_STATE;
+ }
+
+prev_buffer:
+ val = NWL_RING_GET(ring + prev * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, DMA_BD_BUFL_STATUS_OFFSET)&DMA_BD_STATUS_MASK;
+
+ if (val & (DMA_BD_ERROR_MASK|DMA_BD_COMP_MASK)) {
+ info->tail = prev;
+
+ if (prev > 0) prev -= 1;
+ else prev = PCILIB_NWL_DMA_PAGES - 1;
+
+ if (prev != info->head) goto prev_buffer;
+ }
+
+ return 0;
+}
+
+
+static int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
int err = 0;
int i;
+ int preserve = 0;
uint16_t sub_use;
uint32_t val;
uint32_t buf_sz;
uint64_t buf_pa;
- pcilib_kmem_reuse_t reuse_ring, reuse_pages;
+ pcilib_kmem_reuse_state_t reuse_ring, reuse_pages;
pcilib_kmem_flags_t flags;
char *base = info->base_addr;
@@ -30,7 +116,7 @@ int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_descriptio
// Or bidirectional specified by 0x0|addr, or read 0x0|addr and write 0x80|addr
sub_use = info->desc.addr|(info->desc.direction == PCILIB_DMA_TO_DEVICE)?0x80:0x00;
- flags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|info->preserve?PCILIB_KMEM_FLAG_PRESERVE:0;
+ flags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|info->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0;
pcilib_kmem_handle_t *ring = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, PCILIB_NWL_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, sub_use), flags);
pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_PAGE, PCILIB_NWL_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, sub_use), flags);
@@ -43,44 +129,67 @@ int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_descriptio
return err;
}
-
-/*
reuse_ring = pcilib_kmem_is_reused(ctx->pcilib, ring);
reuse_pages = pcilib_kmem_is_reused(ctx->pcilib, pages);
- if ((reuse_ring == PCILIB_KMEM_REUSE_REUSED)&&(reuse_pages == PCILIB_KMEM_REUSE_REUSED)) info->preserve = 1;
- else if (reuse_ring||reuse_pages) pcilib_warning("Inconsistent buffers in the kernel module are detected");
-*/
+
+ if (!info->preserve) {
+ if (reuse_ring == reuse_pages) {
+ if (reuse_ring & PCILIB_KMEM_REUSE_PARTIAL) pcilib_warning("Inconsistent DMA buffers are found (only part of required buffers is available), reinitializing...");
+ else if (reuse_ring & PCILIB_KMEM_REUSE_REUSED) {
+ if (reuse_ring & PCILIB_KMEM_REUSE_PERSISTENT == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
+ else if (reuse_ring & PCILIB_KMEM_REUSE_HARDWARE == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
+ else {
+ nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
+ if (val&DMA_ENG_RUNNING == 0) pcilib_warning("Lost DMA buffers are found (DMA engine is stopped), reinitializing...");
+ else preserve = 1;
+ }
+ }
+ } else pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
+ }
+
unsigned char *data = (unsigned char*)pcilib_kmem_get_ua(ctx->pcilib, ring);
uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, ring);
-
- memset(data, 0, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
- for (i = 0; i < PCILIB_NWL_DMA_PAGES; i++, data += PCILIB_NWL_DMA_DESCRIPTOR_SIZE) {
- buf_pa = pcilib_kmem_get_block_pa(ctx->pcilib, pages, i);
- buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, i);
+ if (preserve) {
+ if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) err = dma_nwl_compute_read_c2s_pointers(ctx, info, data, ring_pa);
+ else err = dma_nwl_compute_read_s2c_pointers(ctx, info, data, ring_pa);
- NWL_RING_SET(data, DMA_BD_NDESC_OFFSET, ring_pa + ((i + 1) % PCILIB_NWL_DMA_PAGES) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
- NWL_RING_SET(data, DMA_BD_BUFAL_OFFSET, buf_pa&0xFFFFFFFF);
- NWL_RING_SET(data, DMA_BD_BUFAH_OFFSET, buf_pa>>32);
+ if (err) preserve = 0;
+ }
+
+ if (preserve)
+ info->reused = 1;
+ else {
+ memset(data, 0, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
+
+ for (i = 0; i < PCILIB_NWL_DMA_PAGES; i++, data += PCILIB_NWL_DMA_DESCRIPTOR_SIZE) {
+ buf_pa = pcilib_kmem_get_block_pa(ctx->pcilib, pages, i);
+ buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, i);
+
+ NWL_RING_SET(data, DMA_BD_NDESC_OFFSET, ring_pa + ((i + 1) % PCILIB_NWL_DMA_PAGES) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
+ NWL_RING_SET(data, DMA_BD_BUFAL_OFFSET, buf_pa&0xFFFFFFFF);
+ NWL_RING_SET(data, DMA_BD_BUFAH_OFFSET, buf_pa>>32);
#ifdef NWL_GENERATE_DMA_IRQ
- NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz | DMA_BD_INT_ERROR_MASK | DMA_BD_INT_COMP_MASK);
+ NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz | DMA_BD_INT_ERROR_MASK | DMA_BD_INT_COMP_MASK);
#else /* NWL_GENERATE_DMA_IRQ */
- NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz);
+ NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz);
#endif /* NWL_GENERATE_DMA_IRQ */
- }
+ }
+
+ val = ring_pa;
+ nwl_write_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
+ nwl_write_register(val, ctx, base, REG_SW_NEXT_BD);
- val = ring_pa;
- nwl_write_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
- nwl_write_register(val, ctx, base, REG_SW_NEXT_BD);
+ info->head = 0;
+ info->tail = 0;
+ }
info->ring = ring;
info->pages = pages;
info->page_size = buf_sz;
info->ring_size = PCILIB_NWL_DMA_PAGES;
- info->head = 0;
- info->tail = 0;
return 0;
}
diff --git a/dma/nwl_engine.c b/dma/nwl_engine.c
index 59e9b93..58d5522 100644
--- a/dma/nwl_engine.c
+++ b/dma/nwl_engine.c
@@ -65,88 +65,89 @@ int dma_nwl_start_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
if (info->started) return 0;
+
// This will only successed if there are no parallel access to DMA engine
err = dma_nwl_allocate_engine_buffers(ctx, info);
if (err) return err;
- // Check if DMA engine is enabled
- nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
- if (val&DMA_ENG_RUNNING) {
-// info->preserve = 1;
-
- // We need to positionate buffers correctly (both read and write)
- //DSS info->tail, info->head
-
-// pcilib_error("Not implemented");
-
-// info->started = 1;
-// return 0;
- }
+ if (info->reused) {
+ info->preserve = 1;
+
+ // Acknowledge asserted engine interrupts
+ if (val & DMA_ENG_INT_ACTIVE_MASK) {
+ val |= DMA_ENG_ALLINT_MASK;
+ nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ }
+#ifdef NWL_GENERATE_DMA_IRQ
+ dma_nwl_enable_engine_irq(ctx, dma);
+#endif /* NWL_GENERATE_DMA_IRQ */
+ } else {
// Disable IRQs
- err = dma_nwl_disable_engine_irq(ctx, dma);
- if (err) return err;
+ err = dma_nwl_disable_engine_irq(ctx, dma);
+ if (err) return err;
// Disable Engine & Reseting
- val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET;
- nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET;
+ nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
- gettimeofday(&start, NULL);
- do {
- nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
- gettimeofday(&cur, NULL);
- } while ((val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
+ gettimeofday(&start, NULL);
+ do {
+ nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ gettimeofday(&cur, NULL);
+ } while ((val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
- if (val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET)) {
- pcilib_error("Timeout during reset of DMA engine %i", info->desc.addr);
- return PCILIB_ERROR_TIMEOUT;
- }
+ if (val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET)) {
+ pcilib_error("Timeout during reset of DMA engine %i", info->desc.addr);
+ return PCILIB_ERROR_TIMEOUT;
+ }
- val = DMA_ENG_RESET;
- nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ val = DMA_ENG_RESET;
+ nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
- gettimeofday(&start, NULL);
- do {
- nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
- gettimeofday(&cur, NULL);
- } while ((val & DMA_ENG_RESET)&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
+ gettimeofday(&start, NULL);
+ do {
+ nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ gettimeofday(&cur, NULL);
+ } while ((val & DMA_ENG_RESET)&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
- if (val & DMA_ENG_RESET) {
- pcilib_error("Timeout during reset of DMA engine %i", info->desc.addr);
- return PCILIB_ERROR_TIMEOUT;
- }
+ if (val & DMA_ENG_RESET) {
+ pcilib_error("Timeout during reset of DMA engine %i", info->desc.addr);
+ return PCILIB_ERROR_TIMEOUT;
+ }
- // Acknowledge asserted engine interrupts
- if (val & DMA_ENG_INT_ACTIVE_MASK) {
- val |= DMA_ENG_ALLINT_MASK;
- nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
- }
+ // Acknowledge asserted engine interrupts
+ if (val & DMA_ENG_INT_ACTIVE_MASK) {
+ val |= DMA_ENG_ALLINT_MASK;
+ nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ }
- ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
- nwl_write_register(ring_pa, ctx, info->base_addr, REG_DMA_ENG_NEXT_BD);
- nwl_write_register(ring_pa, ctx, info->base_addr, REG_SW_NEXT_BD);
+ ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
+ nwl_write_register(ring_pa, ctx, info->base_addr, REG_DMA_ENG_NEXT_BD);
+ nwl_write_register(ring_pa, ctx, info->base_addr, REG_SW_NEXT_BD);
- __sync_synchronize();
+ __sync_synchronize();
- nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
- val |= (DMA_ENG_ENABLE);
- nwl_write_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
+ nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
+ val |= (DMA_ENG_ENABLE);
+ nwl_write_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
- __sync_synchronize();
+ __sync_synchronize();
#ifdef NWL_GENERATE_DMA_IRQ
- dma_nwl_enable_engine_irq(ctx, dma);
+ dma_nwl_enable_engine_irq(ctx, dma);
#endif /* NWL_GENERATE_DMA_IRQ */
- if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) {
- ring_pa += (info->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
- nwl_write_register(ring_pa, ctx, info->base_addr, REG_SW_NEXT_BD);
+ if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) {
+ ring_pa += (info->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
+ nwl_write_register(ring_pa, ctx, info->base_addr, REG_SW_NEXT_BD);
- info->tail = 0;
- info->head = (info->ring_size - 1);
- } else {
- info->tail = 0;
- info->head = 0;
+ info->tail = 0;
+ info->head = (info->ring_size - 1);
+ } else {
+ info->tail = 0;
+ info->head = 0;
+ }
}
info->started = 1;
@@ -174,6 +175,12 @@ int dma_nwl_stop_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET|DMA_ENG_RESET;
nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ gettimeofday(&start, NULL);
+ do {
+ nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
+ gettimeofday(&cur, NULL);
+ } while ((val & (DMA_ENG_RUNNING))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
+
if (info->ring) {
ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
nwl_write_register(ring_pa, ctx, info->base_addr, REG_DMA_ENG_NEXT_BD);
diff --git a/dma/nwl_irq.c b/dma/nwl_irq.c
index 3a9d9a2..86f1845 100644
--- a/dma/nwl_irq.c
+++ b/dma/nwl_irq.c
@@ -37,7 +37,7 @@ int dma_nwl_enable_irq(pcilib_dma_context_t *vctx, pcilib_irq_type_t type, pcili
uint32_t val;
nwl_dma_t *ctx = (nwl_dma_t*)vctx;
- if (flags&PCILIB_DMA_FLAG_PERMANENT) ctx->irq_preserve |= type;
+ if (flags&PCILIB_DMA_FLAG_PERSISTENT) ctx->irq_preserve |= type;
if ((ctx->irq_enabled&type) == type) return 0;
@@ -72,7 +72,7 @@ int dma_nwl_disable_irq(pcilib_dma_context_t *vctx, pcilib_dma_flags_t flags) {
val &= ~(DMA_INT_ENABLE|DMA_USER_INT_ENABLE);
nwl_write_register(val, ctx, ctx->base_addr, REG_DMA_CTRL_STATUS);
- if (flags&PCILIB_DMA_FLAG_PERMANENT) ctx->irq_preserve = 0;
+ if (flags&PCILIB_DMA_FLAG_PERSISTENT) ctx->irq_preserve = 0;
return 0;
}