2020-07-23 06:10:33 +08:00
|
|
|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
|
|
|
|
/* Copyright (c) 2020 Marvell International Ltd. */
|
|
|
|
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/qed/qed_chain.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
|
|
|
|
#include "qed_dev_api.h"
|
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
static void qed_chain_init(struct qed_chain *chain,
|
|
|
|
const struct qed_chain_init_params *params,
|
|
|
|
u32 page_cnt)
|
2020-07-23 06:10:36 +08:00
|
|
|
{
|
|
|
|
memset(chain, 0, sizeof(*chain));
|
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
chain->elem_size = params->elem_size;
|
|
|
|
chain->intended_use = params->intended_use;
|
|
|
|
chain->mode = params->mode;
|
|
|
|
chain->cnt_type = params->cnt_type;
|
2020-07-23 06:10:36 +08:00
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size,
|
|
|
|
params->page_size);
|
2020-07-23 06:10:38 +08:00
|
|
|
chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
|
2020-07-23 06:10:39 +08:00
|
|
|
params->page_size,
|
2020-07-23 06:10:38 +08:00
|
|
|
params->mode);
|
|
|
|
chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
|
|
|
|
params->mode);
|
2020-07-23 06:10:36 +08:00
|
|
|
|
|
|
|
chain->elem_per_page_mask = chain->elem_per_page - 1;
|
|
|
|
chain->next_page_mask = chain->usable_per_page &
|
|
|
|
chain->elem_per_page_mask;
|
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
chain->page_size = params->page_size;
|
2020-07-23 06:10:36 +08:00
|
|
|
chain->page_cnt = page_cnt;
|
|
|
|
chain->capacity = chain->usable_per_page * page_cnt;
|
|
|
|
chain->size = chain->elem_per_page * page_cnt;
|
2020-07-23 06:10:37 +08:00
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
if (params->ext_pbl_virt) {
|
|
|
|
chain->pbl_sp.table_virt = params->ext_pbl_virt;
|
|
|
|
chain->pbl_sp.table_phys = params->ext_pbl_phys;
|
2020-07-23 06:10:37 +08:00
|
|
|
|
|
|
|
chain->b_external_pbl = true;
|
|
|
|
}
|
2020-07-23 06:10:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
|
|
|
|
void *virt_curr, void *virt_next,
|
|
|
|
dma_addr_t phys_next)
|
|
|
|
{
|
|
|
|
struct qed_chain_next *next;
|
|
|
|
u32 size;
|
|
|
|
|
|
|
|
size = chain->elem_size * chain->usable_per_page;
|
|
|
|
next = virt_curr + size;
|
|
|
|
|
|
|
|
DMA_REGPAIR_LE(next->next_phys, phys_next);
|
|
|
|
next->next_virt = virt_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
|
|
|
|
dma_addr_t phys_addr)
|
|
|
|
{
|
|
|
|
chain->p_virt_addr = virt_addr;
|
|
|
|
chain->p_phys_addr = phys_addr;
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:10:33 +08:00
|
|
|
static void qed_chain_free_next_ptr(struct qed_dev *cdev,
|
|
|
|
struct qed_chain *chain)
|
|
|
|
{
|
|
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
struct qed_chain_next *next;
|
|
|
|
dma_addr_t phys, phys_next;
|
|
|
|
void *virt, *virt_next;
|
|
|
|
u32 size, i;
|
|
|
|
|
|
|
|
size = chain->elem_size * chain->usable_per_page;
|
|
|
|
virt = chain->p_virt_addr;
|
|
|
|
phys = chain->p_phys_addr;
|
|
|
|
|
|
|
|
for (i = 0; i < chain->page_cnt; i++) {
|
|
|
|
if (!virt)
|
|
|
|
break;
|
|
|
|
|
|
|
|
next = virt + size;
|
|
|
|
virt_next = next->next_virt;
|
|
|
|
phys_next = HILO_DMA_REGPAIR(next->next_phys);
|
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
dma_free_coherent(dev, chain->page_size, virt, phys);
|
2020-07-23 06:10:33 +08:00
|
|
|
|
|
|
|
virt = virt_next;
|
|
|
|
phys = phys_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qed_chain_free_single(struct qed_dev *cdev,
|
|
|
|
struct qed_chain *chain)
|
|
|
|
{
|
|
|
|
if (!chain->p_virt_addr)
|
|
|
|
return;
|
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
dma_free_coherent(&cdev->pdev->dev, chain->page_size,
|
2020-07-23 06:10:33 +08:00
|
|
|
chain->p_virt_addr, chain->p_phys_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
|
|
|
|
{
|
|
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
struct addr_tbl_entry *entry;
|
2020-07-23 06:10:35 +08:00
|
|
|
u32 i;
|
2020-07-23 06:10:33 +08:00
|
|
|
|
|
|
|
if (!chain->pbl.pp_addr_tbl)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < chain->page_cnt; i++) {
|
|
|
|
entry = chain->pbl.pp_addr_tbl + i;
|
|
|
|
if (!entry->virt_addr)
|
|
|
|
break;
|
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
dma_free_coherent(dev, chain->page_size, entry->virt_addr,
|
2020-07-23 06:10:33 +08:00
|
|
|
entry->dma_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!chain->b_external_pbl)
|
2020-07-23 06:10:35 +08:00
|
|
|
dma_free_coherent(dev, chain->pbl_sp.table_size,
|
|
|
|
chain->pbl_sp.table_virt,
|
|
|
|
chain->pbl_sp.table_phys);
|
2020-07-23 06:10:33 +08:00
|
|
|
|
|
|
|
vfree(chain->pbl.pp_addr_tbl);
|
|
|
|
chain->pbl.pp_addr_tbl = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qed_chain_free() - Free chain DMA memory.
|
|
|
|
*
|
|
|
|
* @cdev: Main device structure.
|
|
|
|
* @chain: Chain to free.
|
|
|
|
*/
|
|
|
|
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
|
|
|
|
{
|
|
|
|
switch (chain->mode) {
|
|
|
|
case QED_CHAIN_MODE_NEXT_PTR:
|
|
|
|
qed_chain_free_next_ptr(cdev, chain);
|
|
|
|
break;
|
|
|
|
case QED_CHAIN_MODE_SINGLE:
|
|
|
|
qed_chain_free_single(cdev, chain);
|
|
|
|
break;
|
|
|
|
case QED_CHAIN_MODE_PBL:
|
|
|
|
qed_chain_free_pbl(cdev, chain);
|
|
|
|
break;
|
|
|
|
default:
|
2020-07-23 06:10:34 +08:00
|
|
|
return;
|
2020-07-23 06:10:33 +08:00
|
|
|
}
|
2020-07-23 06:10:34 +08:00
|
|
|
|
|
|
|
qed_chain_init_mem(chain, NULL, 0);
|
2020-07-23 06:10:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qed_chain_alloc_sanity_check(struct qed_dev *cdev,
|
2020-07-23 06:10:38 +08:00
|
|
|
const struct qed_chain_init_params *params,
|
|
|
|
u32 page_cnt)
|
2020-07-23 06:10:33 +08:00
|
|
|
{
|
2020-07-23 06:10:38 +08:00
|
|
|
u64 chain_size;
|
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size);
|
2020-07-23 06:10:38 +08:00
|
|
|
chain_size *= page_cnt;
|
|
|
|
|
|
|
|
if (!chain_size)
|
|
|
|
return -EINVAL;
|
2020-07-23 06:10:33 +08:00
|
|
|
|
|
|
|
/* The actual chain size can be larger than the maximal possible value
|
|
|
|
* after rounding up the requested elements number to pages, and after
|
|
|
|
* taking into account the unusuable elements (next-ptr elements).
|
|
|
|
* The size of a "u16" chain can be (U16_MAX + 1) since the chain
|
|
|
|
* size/capacity fields are of u32 type.
|
|
|
|
*/
|
2020-07-23 06:10:38 +08:00
|
|
|
switch (params->cnt_type) {
|
2020-07-23 06:10:33 +08:00
|
|
|
case QED_CHAIN_CNT_TYPE_U16:
|
|
|
|
if (chain_size > U16_MAX + 1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
case QED_CHAIN_CNT_TYPE_U32:
|
|
|
|
if (chain_size > U32_MAX)
|
|
|
|
break;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
DP_NOTICE(cdev,
|
|
|
|
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
|
|
|
|
chain_size);
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
|
|
|
|
struct qed_chain *chain)
|
|
|
|
{
|
|
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
void *virt, *virt_prev = NULL;
|
|
|
|
dma_addr_t phys;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
for (i = 0; i < chain->page_cnt; i++) {
|
2020-07-23 06:10:39 +08:00
|
|
|
virt = dma_alloc_coherent(dev, chain->page_size, &phys,
|
2020-07-23 06:10:33 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!virt)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
qed_chain_init_mem(chain, virt, phys);
|
|
|
|
qed_chain_reset(chain);
|
|
|
|
} else {
|
|
|
|
qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
|
|
|
|
phys);
|
|
|
|
}
|
|
|
|
|
|
|
|
virt_prev = virt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Last page's next element should point to the beginning of the
|
|
|
|
* chain.
|
|
|
|
*/
|
|
|
|
qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
|
|
|
|
chain->p_phys_addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qed_chain_alloc_single(struct qed_dev *cdev,
|
|
|
|
struct qed_chain *chain)
|
|
|
|
{
|
|
|
|
dma_addr_t phys;
|
|
|
|
void *virt;
|
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size,
|
2020-07-23 06:10:33 +08:00
|
|
|
&phys, GFP_KERNEL);
|
|
|
|
if (!virt)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
qed_chain_init_mem(chain, virt, phys);
|
|
|
|
qed_chain_reset(chain);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:10:37 +08:00
|
|
|
static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
|
2020-07-23 06:10:33 +08:00
|
|
|
{
|
|
|
|
struct device *dev = &cdev->pdev->dev;
|
|
|
|
struct addr_tbl_entry *addr_tbl;
|
|
|
|
dma_addr_t phys, pbl_phys;
|
2020-07-23 06:10:35 +08:00
|
|
|
__le64 *pbl_virt;
|
2020-07-23 06:10:33 +08:00
|
|
|
u32 page_cnt, i;
|
|
|
|
size_t size;
|
|
|
|
void *virt;
|
|
|
|
|
|
|
|
page_cnt = chain->page_cnt;
|
|
|
|
|
|
|
|
size = array_size(page_cnt, sizeof(*addr_tbl));
|
|
|
|
if (unlikely(size == SIZE_MAX))
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
addr_tbl = vzalloc(size);
|
|
|
|
if (!addr_tbl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
chain->pbl.pp_addr_tbl = addr_tbl;
|
|
|
|
|
2020-07-27 19:51:33 +08:00
|
|
|
if (chain->b_external_pbl) {
|
|
|
|
pbl_virt = chain->pbl_sp.table_virt;
|
2020-07-23 06:10:37 +08:00
|
|
|
goto alloc_pages;
|
2020-07-27 19:51:33 +08:00
|
|
|
}
|
2020-07-23 06:10:33 +08:00
|
|
|
|
2020-07-23 06:10:37 +08:00
|
|
|
size = array_size(page_cnt, sizeof(*pbl_virt));
|
|
|
|
if (unlikely(size == SIZE_MAX))
|
|
|
|
return -EOVERFLOW;
|
2020-07-23 06:10:33 +08:00
|
|
|
|
2020-07-23 06:10:37 +08:00
|
|
|
pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
|
2020-07-23 06:10:33 +08:00
|
|
|
if (!pbl_virt)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-07-23 06:10:35 +08:00
|
|
|
chain->pbl_sp.table_virt = pbl_virt;
|
|
|
|
chain->pbl_sp.table_phys = pbl_phys;
|
|
|
|
chain->pbl_sp.table_size = size;
|
2020-07-23 06:10:33 +08:00
|
|
|
|
2020-07-23 06:10:37 +08:00
|
|
|
alloc_pages:
|
2020-07-23 06:10:33 +08:00
|
|
|
for (i = 0; i < page_cnt; i++) {
|
2020-07-23 06:10:39 +08:00
|
|
|
virt = dma_alloc_coherent(dev, chain->page_size, &phys,
|
2020-07-23 06:10:33 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!virt)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
qed_chain_init_mem(chain, virt, phys);
|
|
|
|
qed_chain_reset(chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill the PBL table with the physical address of the page */
|
2020-07-23 06:10:35 +08:00
|
|
|
pbl_virt[i] = cpu_to_le64(phys);
|
2020-07-23 06:10:33 +08:00
|
|
|
|
|
|
|
/* Keep the virtual address of the page */
|
|
|
|
addr_tbl[i].virt_addr = virt;
|
|
|
|
addr_tbl[i].dma_map = phys;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
/**
|
|
|
|
* qed_chain_alloc() - Allocate and initialize a chain.
|
|
|
|
*
|
|
|
|
* @cdev: Main device structure.
|
|
|
|
* @chain: Chain to be processed.
|
|
|
|
* @params: Chain initialization parameters.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, negative errno otherwise.
|
|
|
|
*/
|
|
|
|
int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
|
|
|
|
struct qed_chain_init_params *params)
|
2020-07-23 06:10:33 +08:00
|
|
|
{
|
|
|
|
u32 page_cnt;
|
|
|
|
int rc;
|
|
|
|
|
2020-07-23 06:10:39 +08:00
|
|
|
if (!params->page_size)
|
|
|
|
params->page_size = QED_CHAIN_PAGE_SIZE;
|
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
if (params->mode == QED_CHAIN_MODE_SINGLE)
|
2020-07-23 06:10:33 +08:00
|
|
|
page_cnt = 1;
|
|
|
|
else
|
2020-07-23 06:10:38 +08:00
|
|
|
page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
|
|
|
|
params->elem_size,
|
2020-07-23 06:10:39 +08:00
|
|
|
params->page_size,
|
2020-07-23 06:10:38 +08:00
|
|
|
params->mode);
|
2020-07-23 06:10:33 +08:00
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
|
2020-07-23 06:10:33 +08:00
|
|
|
if (rc) {
|
|
|
|
DP_NOTICE(cdev,
|
|
|
|
"Cannot allocate a chain with the given arguments:\n");
|
|
|
|
DP_NOTICE(cdev,
|
2020-07-23 06:10:39 +08:00
|
|
|
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n",
|
2020-07-23 06:10:38 +08:00
|
|
|
params->intended_use, params->mode, params->cnt_type,
|
2020-07-23 06:10:39 +08:00
|
|
|
params->num_elems, params->elem_size,
|
|
|
|
params->page_size);
|
2020-07-23 06:10:33 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
qed_chain_init(chain, params, page_cnt);
|
2020-07-23 06:10:33 +08:00
|
|
|
|
2020-07-23 06:10:38 +08:00
|
|
|
switch (params->mode) {
|
2020-07-23 06:10:33 +08:00
|
|
|
case QED_CHAIN_MODE_NEXT_PTR:
|
|
|
|
rc = qed_chain_alloc_next_ptr(cdev, chain);
|
|
|
|
break;
|
|
|
|
case QED_CHAIN_MODE_SINGLE:
|
|
|
|
rc = qed_chain_alloc_single(cdev, chain);
|
|
|
|
break;
|
|
|
|
case QED_CHAIN_MODE_PBL:
|
2020-07-23 06:10:37 +08:00
|
|
|
rc = qed_chain_alloc_pbl(cdev, chain);
|
2020-07-23 06:10:33 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rc)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
qed_chain_free(cdev, chain);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|