Commit 4a13da1f authored by Carsten Heinz's avatar Carsten Heinz
Browse files

Fix logging formatting for 32bit systems

parent ba1ff11a
Pipeline #572 failed with stage
in 26 minutes and 5 seconds
......@@ -70,7 +70,7 @@ int blue_dma_init(struct dma_engine *dma) {
ssize_t blue_dma_copy_from(struct dma_engine *dma, void *dma_handle, dev_addr_t dev_addr, size_t len)
{
dma_addr_t handle = (dma_addr_t)dma_handle;
DEVLOG(dma->dev_id, TLKM_LF_DMA, "dev_addr = 0x%px, dma_handle = 0x%llx, len: %zu bytes", (void *)dev_addr, handle, len);
DEVLOG(dma->dev_id, TLKM_LF_DMA, "dev_addr = 0x%p, dma_handle = 0x%p, len: %zu bytes", (void *)dev_addr, dma_handle, len);
if (mutex_lock_interruptible(&dma->regs_mutex)) {
WRN("got killed while aquiring the mutex");
return len;
......@@ -88,7 +88,7 @@ ssize_t blue_dma_copy_from(struct dma_engine *dma, void *dma_handle, dev_addr_t
ssize_t blue_dma_copy_to(struct dma_engine *dma, dev_addr_t dev_addr, const void *dma_handle, size_t len)
{
dma_addr_t handle = (dma_addr_t)dma_handle;
DEVLOG(dma->dev_id, TLKM_LF_DMA, "dev_addr = 0x%px, dma_handle = 0x%llx, len: %zu bytes", (void *)dev_addr, handle, len);
DEVLOG(dma->dev_id, TLKM_LF_DMA, "dev_addr = 0x%px, dma_handle = 0x%p, len: %zu bytes", (void *)dev_addr, dma_handle, len);
if (mutex_lock_interruptible(&dma->regs_mutex)) {
WRN("got killed while aquiring the mutex");
return len;
......
......@@ -62,7 +62,7 @@ int tlkm_dma_init(struct tlkm_device *dev, struct dma_engine *dma, u64 dbase)
DEVLOG(dev_id, TLKM_LF_DMA, "I/O remapping 0x%px - 0x%px...", base, base + DMA_SZ - 1);
dma->regs = ioremap_nocache((resource_size_t)base, DMA_SZ);
if (dma->regs == 0 || IS_ERR(dma->regs)) {
DEVERR(dev_id, "failed to map 0x%px - 0x%px: %ld", base, base + DMA_SZ - 1, PTR_ERR(dma->regs));
DEVERR(dev_id, "failed to map 0x%p - 0x%p: %lx", base, base + DMA_SZ - 1, PTR_ERR(dma->regs));
ret = EIO;
goto err_dma_ioremap;
}
......@@ -159,7 +159,7 @@ ssize_t tlkm_dma_copy_to(struct dma_engine *dma, dev_addr_t dev_addr, const void
while (len > 0) {
DEVLOG(dma->dev_id, TLKM_LF_DMA, "outstanding bytes: %zd - usr_addr = 0x%px, dev_addr = 0x%px",
len, usr_addr, (void *)dev_addr);
DEVLOG(dma->dev_id, TLKM_LF_DMA, "using buffer: %d and waiting for t_id == %ld", current_buffer, t_ids[current_buffer]);
DEVLOG(dma->dev_id, TLKM_LF_DMA, "using buffer: %d and waiting for t_id == %zd", current_buffer, t_ids[current_buffer]);
cpy_sz = len < TLKM_DMA_CHUNK_SZ ? len : TLKM_DMA_CHUNK_SZ;
if (wait_event_interruptible(dma->wq, atomic64_read(&dma->wq_processed) >= t_ids[current_buffer])) {
DEVWRN(dma->dev_id, "got killed while hanging in waiting queue");
......@@ -215,7 +215,7 @@ ssize_t tlkm_dma_copy_from(struct dma_engine *dma, void __user *usr_addr, dev_ad
while (len > 0) {
DEVLOG(dma->dev_id, TLKM_LF_DMA, "outstanding bytes: %zd - usr_addr = 0x%px, dev_addr = 0x%px",
len, usr_addr, (void *)dev_addr);
DEVLOG(dma->dev_id, TLKM_LF_DMA, "using buffer: %d and waiting for t_id == %ld", current_buffer, chunks[current_buffer].t_id);
DEVLOG(dma->dev_id, TLKM_LF_DMA, "using buffer: %d and waiting for t_id == %zd", current_buffer, chunks[current_buffer].t_id);
if (wait_event_interruptible(dma->rq, atomic64_read(&dma->rq_processed) >= chunks[current_buffer].t_id)) {
DEVWRN(dma->dev_id, "got killed while hanging in waiting queue");
return -EACCES;
......
......@@ -51,12 +51,12 @@ static int claim_device(struct tlkm_pcie_device *pdev)
pdev->phy_len_bar0 = pci_resource_len(dev, 0);
pdev->phy_flags_bar0 = pci_resource_flags(dev, 0);
DEVLOG(did, TLKM_LF_PCIE, "PCI bar 0: address= 0x%08llx length: 0x%08llx",
pdev->phy_addr_bar0, pdev->phy_len_bar0);
DEVLOG(did, TLKM_LF_PCIE, "PCI bar 0: address= 0x%zx length: 0x%zx",
(size_t) pdev->phy_addr_bar0, (size_t) pdev->phy_len_bar0);
pdev->parent->base_offset = pdev->phy_addr_bar0;
DEVLOG(did, TLKM_LF_PCIE, "status core base: 0x%08llx => 0x%08llx",
(u64)pcie_cls.platform.status.base, (u64)pcie_cls.platform.status.base + pdev->parent->base_offset);
DEVLOG(did, TLKM_LF_PCIE, "status core base: 0x%8p => 0x%8p",
(void*) pcie_cls.platform.status.base, (void*) pcie_cls.platform.status.base + pdev->parent->base_offset);
return 0;
......@@ -278,7 +278,7 @@ int pcie_device_dma_allocate_buffer(dev_id_t dev_id, struct tlkm_device *dev, vo
dma_addr_t *handle = (dma_addr_t*)dev_handle;
int err = 0;
*buffer = kmalloc(size, 0);
DEVLOG(dev_id, TLKM_LF_DEVICE, "Allocated %ld bytes at kernel address %p trying to map into DMA space...", size, *buffer);
DEVLOG(dev_id, TLKM_LF_DEVICE, "Allocated %zd bytes at kernel address %p trying to map into DMA space...", size, *buffer);
if (*buffer) {
memset(*buffer, 0, size);
*handle = dma_map_single(&pdev->pdev->dev, *buffer, size, direction == FROM_DEV ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
......@@ -291,7 +291,7 @@ int pcie_device_dma_allocate_buffer(dev_id_t dev_id, struct tlkm_device *dev, vo
err = -EFAULT;
}
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapped buffer to device address %llx", *handle);
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapped buffer to device address %p", (void*) *handle);
return err;
}
......@@ -300,7 +300,7 @@ void pcie_device_dma_free_buffer(dev_id_t dev_id, struct tlkm_device *dev, void*
{
struct tlkm_pcie_device *pdev = (struct tlkm_pcie_device *)dev->private_data;
dma_addr_t *handle = (dma_addr_t*)dev_handle;
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapped buffer to device address %llx", *handle);
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapped buffer to device address %p", (void*) *handle);
if (*handle) {
dma_unmap_single(&pdev->pdev->dev, *handle, size, direction == FROM_DEV ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
*handle = 0;
......@@ -314,7 +314,7 @@ void pcie_device_dma_free_buffer(dev_id_t dev_id, struct tlkm_device *dev, void*
inline int pcie_device_dma_sync_buffer_cpu(dev_id_t dev_id, struct tlkm_device *dev, void** buffer, void **dev_handle, dma_direction_t direction, size_t size) {
struct tlkm_pcie_device *pdev = (struct tlkm_pcie_device *)dev->private_data;
dma_addr_t *handle = (dma_addr_t*)dev_handle;
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapping buffer %llx for cpu", *handle);
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapping buffer %p for cpu", *dev_handle);
dma_sync_single_for_cpu(&pdev->pdev->dev, *handle, size, direction == FROM_DEV ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
return 0;
}
......@@ -322,7 +322,7 @@ inline int pcie_device_dma_sync_buffer_cpu(dev_id_t dev_id, struct tlkm_device *
inline int pcie_device_dma_sync_buffer_dev(dev_id_t dev_id, struct tlkm_device *dev, void** buffer, void **dev_handle, dma_direction_t direction, size_t size) {
struct tlkm_pcie_device *pdev = (struct tlkm_pcie_device *)dev->private_data;
dma_addr_t *handle = (dma_addr_t*)dev_handle;
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapping buffer %llx for device", *handle);
DEVLOG(dev_id, TLKM_LF_DEVICE, "Mapping buffer %p for device", *dev_handle);
dma_sync_single_for_device(&pdev->pdev->dev, *handle, size, direction == FROM_DEV ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
return 0;
}
\ No newline at end of file
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment