18 #include <linux/types.h>
19 #include <linux/list.h>
54 static bool ext_phys_mem_pool_enabled;
69 static int drv_proc_free_node_res(
int id,
void *
p,
void *
data);
88 (*node_res_obj)->node = hnode;
90 &(*node_res_obj)->
id);
93 pr_err(
"%s: OUT OF MEMORY\n", __func__);
99 &(*node_res_obj)->
id);
102 pr_err(
"%s: FAILED, IDR is FULL\n", __func__);
107 kfree(*node_res_obj);
114 static int drv_proc_free_node_res(
int id,
void *
p,
void *
data)
128 (node_res_obj->
node, &status);
150 pr_err(
"%s: proc_un_map failed!"
151 " status = 0x%xn", __func__, status);
160 pr_err(
"%s: proc_un_reserve_memory failed!"
161 " status = 0x%xn", __func__, status);
199 void *strm_res,
void *process_ctxt)
208 if (*pstrm_res ==
NULL) {
213 (*pstrm_res)->stream = stream_obj;
218 pr_err(
"%s: OUT OF MEMORY\n", __func__);
227 pr_err(
"%s: FAILED, IDR is FULL\n", __func__);
235 static int drv_proc_free_strm_res(
int id,
void *p,
void *process_ctxt)
258 strm_info.user_strm = &
user;
259 user.number_bufs_in_stream = 0;
261 while (
user.number_bufs_in_stream--)
263 (
u32 *) &ul_buf_size, &dw_arg);
308 INIT_LIST_HEAD(&pdrv_object->
dev_list);
319 pr_err(
"%s: Failed to store DRV object\n", __func__);
324 *drv_obj = pdrv_object;
350 pr_err(
"%s: Failed to store DRV object\n", __func__);
369 for (i = 0; i <
index; i++) {
391 u32 dw_dev_object = 0;
397 if (!list_empty(&pdrv_obj->
dev_list))
400 pr_err(
"%s: Failed to retrieve the object handle\n", __func__);
403 return dw_dev_object;
414 u32 dw_dev_extension = 0;
425 pr_err(
"%s: Failed to retrieve the object handle\n", __func__);
428 return dw_dev_extension;
440 u32 dw_next_dev_object = 0;
447 if (!list_empty(&pdrv_obj->
dev_list)) {
449 if (list_is_last(curr, &pdrv_obj->
dev_list))
451 dw_next_dev_object = (
u32) curr->
next;
454 pr_err(
"%s: Failed to retrieve the object handle\n", __func__);
457 return dw_next_dev_object;
470 u32 dw_dev_extension = 0;
478 curr = (
struct list_head *)dev_extension;
481 dw_dev_extension = (
u32) curr->
next;
484 pr_err(
"%s: Failed to retrieve the object handle\n", __func__);
487 return dw_dev_extension;
521 if ((
struct dev_object *)cur_elem == hdev_obj) {
569 dev_dbg(
bridge,
"%s: Failed to get Driver Object from Registry",
592 pszdev_node !=
NULL; pszdev_node = (
struct drv_ext *)
594 if ((
u32) pszdev_node == dw_context) {
656 if (host_res !=
NULL) {
657 request_bridge_resources(host_res);
690 if (shm_size >= 0x10000) {
698 pr_err(
"shm reservation Failed\n");
704 "dma_addr %x size %x\n", __func__,
720 *phost_resources = host_res;
731 pool_virt_base = (
u32)
ioremap(pool_phys_base, pool_size);
733 if ((
void **)pool_virt_base ==
NULL) {
734 pr_err(
"%s: external physical memory map failed\n", __func__);
735 ext_phys_mem_pool_enabled =
false;
737 ext_mem_pool.phys_mem_base = pool_phys_base;
738 ext_mem_pool.phys_mem_size = pool_size;
739 ext_mem_pool.virt_mem_base = pool_virt_base;
740 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
741 ext_phys_mem_pool_enabled =
true;
747 if (ext_phys_mem_pool_enabled) {
748 iounmap((
void *)(ext_mem_pool.virt_mem_base));
749 ext_phys_mem_pool_enabled =
false;
768 if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
769 - ext_mem_pool.next_phys_alloc_ptr)) {
773 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
775 new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
777 new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
779 if ((new_alloc_ptr + bytes) <=
780 (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
782 *phys_addr = new_alloc_ptr;
783 ext_mem_pool.next_phys_alloc_ptr =
784 new_alloc_ptr +
bytes;
786 ext_mem_pool.virt_mem_base + (new_alloc_ptr -
789 return (
void *)virt_addr;
803 u32 *physical_address)
809 if (ext_phys_mem_pool_enabled) {
810 va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
816 *physical_address = 0;
818 *physical_address = pa_mem;
831 if (!ext_phys_mem_pool_enabled)