/* * Device driver for SpursEngine(SP3 command control). * * (C) Copyright 2008 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #undef DEBUG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spurs_define.h" #include "spurs_ioctl.h" #include "spurs_cmd.h" #include "spurs_boot.h" #include "spurs_timer.h" #include "spurs_session.h" #include "spurs_event.h" #include "spurs_driver.h" #undef PRINT_PACKET static inline int spurs_get_cmd_packet(spd_cmd_t **cmd_pkt, struct spd_dev *dev) { struct spd_command_info *info = &dev->cmd_info; int index; index = find_first_zero_bit(info->used_cmd_packet, info->cmd_packet_num); if (index >= info->cmd_packet_num) { *cmd_pkt = NULL; return -1; } USE_CMD_PACKET(index, info); *cmd_pkt = &info->cmd_packet[index]; return index; } static inline void spurs_release_cmd_packet(int idx, struct spd_dev *dev) { struct spd_command_info *info = &dev->cmd_info; if (!IS_CMD_PACKET_USED(idx, info)) { spd_err(dev, "received unknown cmd ack(%d)\n", idx); return; } FREE_CMD_PACKET(idx, info); } static struct spd_cmd_request *spurs_search_user_cmd_request( spd_cmd_t *res, struct spd_dev *dev) { struct spd_session *session; struct spd_cmd_request *cmd_req; session = spurs_get_session(res->session_id, dev); if (session == NULL) return NULL; list_for_each_entry(cmd_req, &session->exec_cmd_queue.queue, queue) { if (cmd_req->cmd_packet.request_id == res->request_id) { cmd_req->state = SPD_CMD_STATE_COMPLETED; spurs_put_request_id( cmd_req->cmd_packet.request_id, &session->rid); if (session->event_flag != 0) cmd_req->event_flag = SPD_EVENT_RECEIVED; return cmd_req; } } return NULL; } static struct spd_cmd_request *spurs_search_system_cmd_request( spd_cmd_t *res, struct spd_dev *dev) { struct spd_command_info *info = &dev->cmd_info; struct spd_cmd_request *cmd_req; list_for_each_entry(cmd_req, &info->sys_exec_queue.queue, queue) { if (cmd_req->cmd_packet.request_id == res->request_id) { cmd_req->state = SPD_CMD_STATE_COMPLETED; spurs_put_request_id(cmd_req->cmd_packet.request_id, &dev->cmd_info.sys_rid); return cmd_req; } } return NULL; } struct spd_scatter_list_buffer *spurs_get_scatter_list_buffer( struct spd_dev *dev) { struct spd_command_info *info = &dev->cmd_info; int index; index = find_first_zero_bit(info->used_slb, info->slb_num); if (index < info->slb_num) { spd_dbg(dev, "Get scatter list buffer : %p\n", info->slb[index].head); USE_SLB(index, info); return &info->slb[index]; } else { spd_dbg(dev, "All scatter list buffer are reserved.\n"); info->dma_state = SPD_DMA_STOP; return NULL; } } void spurs_release_scatter_list_buffer(struct spd_scatter_list_buffer *slb, struct spd_dev *dev) { spd_dbg(dev, "Release scatter list buffer : %p\n", slb->head); FREE_SLB(slb - dev->cmd_info.slb, &dev->cmd_info); dev->cmd_info.dma_state = SPD_DMA_START; } #define CONVERT_HOST_TO_SPURS 0 #define CONVERT_SPURS_TO_HOST 1 #ifndef __BIG_ENDIAN static inline void convert_packet_endian(spd_cmd_t *pkt, int dir) { uint16_t cmd_type, cmd_op; if (dir == CONVERT_HOST_TO_SPURS) { cmd_type = pkt->cmd_type; cmd_op = pkt->op; } else { cmd_type = swab16(pkt->cmd_type); cmd_op = swab16(pkt->op); } pkt->cmd_type = swab16(pkt->cmd_type); pkt->session_id = swab16(pkt->session_id); pkt->request_id = swab16(pkt->request_id); pkt->op = swab16(pkt->op); pkt->status = swab16((uint16_t)pkt->status); pkt->data_size = swab16(pkt->data_size); pkt->sl_cache_ctrl = swab16(pkt->sl_cache_ctrl); pkt->sl_length = swab16(pkt->sl_length); if ((cmd_type == SPD_CMD_TYPE_SYSTEM) && ((cmd_op & 0xFFF0) == 0xFFF0)) return; /* operation is debug command. */ pkt->buffer_uva = swab64(pkt->buffer_uva); pkt->sl_uva_base = swab64(pkt->sl_uva_base); pkt->buffer_size = swab32(pkt->buffer_size); pkt->sp3d_local = swab16(pkt->sp3d_local); pkt->spd_status = swab16(pkt->spd_status); } #else #define convert_packet_endian(pkt) #endif /* __BIG_ENDIAN */ #if (defined DEBUG) && (defined PRINT_PACKET) static void print_packet_data(char *data, uint16_t size) { int i; __spd_dbg("data : "); for (i = 0; i < size; i++) { if ((i % 16) == 0) printk("\n%04x : ", i); printk("%02x ", data[i]); } printk("\n"); } static void print_cmd_packet(spd_cmd_t *pkt) { int i; __spd_dbg("***** Packet Information *****\n"); __spd_dbg("cmd_type : %d\n", pkt->cmd_type); __spd_dbg("session_id : %d\n", pkt->session_id); __spd_dbg("request_id : %d\n", pkt->request_id); __spd_dbg("operation : %d\n", pkt->op); __spd_dbg("status : %d\n", pkt->status); __spd_dbg("len : %d\n", pkt->data_size); if ((pkt->cmd_type == SPD_CMD_TYPE_SYSTEM) && ((pkt->op & 0xFFF0) == 0xFFF0)) { __spd_dbg("trigger_code : 0x%x\n", pkt->sl_cache_ctrl); __spd_dbg("trigger_mask : 0x%x\n", pkt->sl_length); print_packet_data((char *)&pkt->buffer_uva, pkt->data_size); } else { __spd_dbg("sl_cache_ctl : %04x\n", pkt->sl_cache_ctrl); __spd_dbg("sl_length : %d\n", pkt->sl_length); __spd_dbg("buffer_uva : %016lx\n", pkt->buffer_uva); __spd_dbg("sl_uva_base : %016lx\n", pkt->sl_uva_base); __spd_dbg("buffer_size : %x\n", pkt->buffer_size); __spd_dbg("spd_status : %d\n", pkt->spd_status); for (i = 0; i < 8; i++) __spd_dbg("sdata[%d] : %08x\n", i, pkt->sdata[i]); print_packet_data(pkt->data, pkt->data_size); } __spd_dbg("*******************************\n"); } #else #define print_packet_data(data, size) #define print_cmd_packet(pkt) #endif /* DEBUG */ int spurs_dma_map_sg(unsigned long user_buff_addr, uint32_t user_buff_size, uint32_t dma_direction, struct spd_dev *dev, struct scatterlist **ret_sgl) { struct page **host_page = NULL; struct scatterlist *sgl = NULL; int nr_pages, mapped_pages; unsigned long start, end, off, len; struct device *sdev = &dev->pci_dev->dev; enum dma_data_direction dir; int i, size, sg_num, ret; /* Get page list of host buffer */ start = user_buff_addr >> PAGE_SHIFT; end = (user_buff_addr + user_buff_size + PAGE_SIZE - 1) >> PAGE_SHIFT; nr_pages = end - start; if (nr_pages == 0) return -EINVAL; /* Alloc page list */ host_page = kmalloc(sizeof(struct page) * nr_pages, GFP_KERNEL); if (host_page == NULL) return -ENOMEM; /* Alloc scatter list */ sgl = kmalloc(sizeof(struct scatterlist) * nr_pages, GFP_KERNEL); if (sgl == NULL) { ret = -ENOMEM; goto end_free_page; } /* Initialize scatter list ( this is necessary after kernel 2.6.24 )*/ /* Added by T.Nagai 2009/11/03 */ sg_init_table(sgl,nr_pages); /* Get pages for user memory */ down_read(¤t->mm->mmap_sem); mapped_pages = get_user_pages(current, current->mm, user_buff_addr, nr_pages, (dma_direction == SPD_DMA_DIR_FROM_SPURS), 0, host_page, NULL); up_read(¤t->mm->mmap_sem); if (mapped_pages < nr_pages) goto end_unmap_page; /* Setup scatter-list */ size = user_buff_size; off = user_buff_addr & ~PAGE_MASK; if (mapped_pages > 1) { len = PAGE_SIZE - off; size -= len; } else len = size; sg_set_page(&sgl[0], host_page[0], len, off); for (i = 1; i < nr_pages; i++) { len = (size < PAGE_SIZE) ? size : PAGE_SIZE; sg_set_page(&sgl[i], host_page[i], len, 0); size -= len; } /* Map scatter-list to DMA address */ dir = (dma_direction == SPD_DMA_DIR_FROM_SPURS) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; sg_num = dma_map_sg(sdev, sgl, nr_pages, dir); *ret_sgl = sgl; kfree(host_page); return sg_num; end_unmap_page: for (i = 0; i < mapped_pages; i++) { page_cache_release(host_page[i]); } ret = -EACCES; end_free_page: kfree(sgl); kfree(host_page); return ret; } void spurs_dma_unmap_sg(struct spd_cmd_request *req, struct spd_dev *dev) { struct page *page; enum dma_data_direction dir; int i; if (req->sgl == NULL) return; dir = (req->dma_direction == SPD_DMA_DIR_FROM_SPURS) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; dma_unmap_sg(&dev->pci_dev->dev, req->sgl, req->sg_num, dir); for (i = 0; i < req->sg_num; i++) { page = sg_page(&req->sgl[i]); if (req->dma_direction == SPD_DMA_DIR_FROM_SPURS) SetPageDirty(page); page_cache_release(page); } kfree(req->sgl); req->sgl = NULL; } static void spurs_setup_dma(struct spd_cmd_request *req, struct spd_dev *dev) { spd_cmd_t *cmd_pkt = &req->cmd_packet; uint64_t *slb_entry, dma_addr; uint32_t size; int i; BUG_ON(req->slb == NULL); /* Setup scatter list tag */ slb_entry = req->slb->head; size = 0; for (i = 0; i < req->sg_num; i++) { dma_addr = DMA_ALIGNED_ADDR( (uint64_t)sg_dma_address(&req->sgl[i]), SPD_DMA_PAGE_ALIGN_MASK); spd_dbg(dev, "dma address[%d] = 0x%016llx\n", i, dma_addr); *slb_entry = cpu_to_be64(dma_addr); size += sg_dma_len(&req->sgl[i]); slb_entry++; } /* Setup command packet */ cmd_pkt->sl_cache_ctrl = req->slb->index | SPD_SLC_TAG_UPDATE_MASK | SPD_SLC_TAG_REFERENCE_MASK; cmd_pkt->sl_cache_ctrl |= (req->dma_direction == SPD_DMA_DIR_FROM_SPURS) ? 0 : SPD_SLC_TAG_DIRECTION_MASK; cmd_pkt->sl_length = req->sg_num; cmd_pkt->buffer_uva = sg_dma_address(&req->sgl[0]); cmd_pkt->buffer_size = DMA_ALIGNED_SIZE(size, SPD_DMA_SIZE_ALIGN_MASK); cmd_pkt->sl_uva_base = DMA_ALIGNED_ADDR(cmd_pkt->buffer_uva, SPD_DMA_PAGE_ALIGN_MASK); } static int spurs_check_dma_param(unsigned long buff_addr, uint32_t buff_size, uint32_t dir, struct spd_dev *dev) { uint32_t trans_size; spd_dbg(dev, "addr=0x%lx, size = %d\n", buff_addr, buff_size); if (dir == SPD_DMA_DIR_FROM_SPURS) { /* Transfer from spurs to host */ if (!spd_write_ok((void __user *)buff_addr, buff_size)) { spd_dbg(dev, "User buffer is invalid.\n"); return -EACCES; } } else { /* Transfer from host to spurs */ if (!spd_read_ok((void __user *)buff_addr, buff_size)) { spd_dbg(dev, "User buffer is invalid.\n"); return -EACCES; } } if ((buff_addr & SPD_DMA_ADDR_ALIGN_MASK) != 0) { spd_dbg(dev, "User buffer doesn't align on 128B boundary.\n"); return -EINVAL; } trans_size = get_max_transfer_size(dev); if ((buff_size == 0) || (buff_size > trans_size) || (buff_size & SPD_DMA_SIZE_ALIGN_MASK)) { spd_dbg(dev, "Transfer size must be less than maximun transfer" "size and 128B alignment.\n"); return -EINVAL; } return 0; } static int spurs_start_quota_queue(struct spd_session *session) { struct spd_dev *dev = session->dev; struct spd_command_info *info = &dev->cmd_info; struct spd_cmd_request *cmd_req; int s_count, d_count; int ret = -EBUSY; spd_dbg(dev, "SID=%d\n", GET_SID(session)); if (session->quota_queue.state != SPD_QUEUE_STATE_START) { spd_dbg(dev, "Quota queue is not started.\n"); return -EPERM; } /* Check transaction count */ s_count = atomic_read(&session->trans_count); d_count = atomic_read(&info->trans_count); spd_dbg(dev, "session count=%d, device count=%d\n", s_count, d_count); if ((s_count < dev->sys_config.max_count) && (d_count < dev->sys_config.total_count)) { cmd_req = spurs_dequeue_command(&session->quota_queue); if (cmd_req) { spd_dbg(dev, "Go to next queue(SID=%d, TID=%d)\n", GET_SID(session), cmd_req->ioctl_req->tag_id); ret = 0; cmd_req->state = SPD_CMD_STATE_QUEUING; if (IS_CMD_TYPE_DMA(cmd_req)) spurs_queue_command( &session->dma_cmd_queue, cmd_req); else spurs_queue_command( &session->user_cmd_queue, cmd_req); atomic_inc(&session->trans_count); atomic_inc(&info->trans_count); } else { spd_dbg(dev, "Quota queue is empty.\n"); ret = -ENOENT; } } return ret; } static int spurs_start_user_cmd_queue(struct spd_cmd_request *cmd_req, struct spd_session *session) { struct spd_dev *dev = session->dev; spd_cmd_t *cmd_packet = NULL; int index, rid; spd_dbg(dev, "Start user cmd queue[SID=%d, TID=%d]\n", GET_SID(session), cmd_req->ioctl_req->tag_id); if (!IS_CMD_STATE_QUEUING(cmd_req)) { spd_dbg(dev, "State of request is not queuing.\n"); return -EPERM; } /* Get command packet */ index = spurs_get_cmd_packet(&cmd_packet, dev); if (index < 0) { spd_dbg(dev, "Can not reserve command packet.\n"); return -EBUSY; } /* Get Request ID */ rid = spurs_get_request_id(&session->rid); if (rid < 0) { spd_err(dev, "Can not allocate request ID.\n"); spurs_release_cmd_packet(index, dev); return -EBUSY; } /* Setup command packet */ cmd_req->cmd_packet.request_id = (uint16_t)rid; #ifdef DEBUG print_cmd_packet(&cmd_req->cmd_packet); #endif memcpy(cmd_packet, &cmd_req->cmd_packet, sizeof(spd_cmd_t)); convert_packet_endian(cmd_packet, CONVERT_HOST_TO_SPURS); cmd_req->state = SPD_CMD_STATE_EXECUTING; spurs_remove_cmd_request(cmd_req); spurs_queue_command(&session->exec_cmd_queue, cmd_req); return index; } static int spurs_start_dma_cmd_queue(struct spd_session *session) { struct spd_dev *dev = session->dev; struct spd_cmd_request *cmd_req; struct spd_scatter_list_buffer *slb = NULL; spd_dbg(dev, "Start DMA cmd queue[SID=%d]\n", GET_SID(session)); if (spurs_cmd_queue_empty(&session->dma_cmd_queue)) { spd_dbg(dev, "DMA cmd queue is empty.\n"); return -ENOENT; } slb = spurs_get_scatter_list_buffer(dev); if (slb == NULL) return -ENOMEM; cmd_req = spurs_dequeue_command(&session->dma_cmd_queue); if (cmd_req == NULL) { spurs_release_scatter_list_buffer(slb, dev); return -ENOENT; } cmd_req->slb = slb; spurs_setup_dma(cmd_req, dev); /* Add request to user command queue */ spd_dbg(dev, "Go to user cmd queue[SID=%d, TID=%d]\n", GET_SID(session), cmd_req->ioctl_req->tag_id); spurs_queue_command(&session->user_cmd_queue, cmd_req); return 0; } void spurs_dispatch_quota_queue(void *data) { struct spd_session *session = (struct spd_session *)data; int ret; spd_dbg(session->dev, "Dispatch quota queue[SID=%d]\n", GET_SID(session)); if (!IS_SESSION_READY(session)) { spd_err(session->dev, "session is not ready.\n"); return; } for (;;) { ret = spurs_start_quota_queue(session); if (ret < 0) break; } } void spurs_dispatch_user_cmd_queue(void *data) { struct spd_session *session = (struct spd_session *)data; struct spd_dev *dev = session->dev; struct spd_cmd_request *cmd_req, *next; uint32_t cmd_flag; int index; spd_dbg(session->dev, "Dispatch user cmd queue[SID=%d]\n", GET_SID(session)); if (!IS_SESSION_READY(session)) { spd_err(dev, "session is not ready.\n"); return; } if (dev->cmd_info.request_flag != 0) { spd_dbg(dev, "sending command.\n"); return; } cmd_flag = 0; list_for_each_entry_safe(cmd_req, next, &session->user_cmd_queue.queue, queue) { if (!IS_CMD_STATE_QUEUING(cmd_req)) continue; index = spurs_start_user_cmd_queue(cmd_req, session); if (index < 0) break; dev->cmd_info.request_flag |= 1 << index; cmd_flag |= SPD_SEND_CMD_PKT_SIZE_1024 << (index * SPD_SEND_CMD_PKT_SHIFT); } if (cmd_flag != 0) { spd_dbg(dev, "start SP3 command :cmd=0x%08x.\n", cmd_flag); spurs_set_rxmbox(SPD_MBOX0, cmd_flag, dev); } } void spurs_dispatch_dma_cmd_queue(void *data) { struct spd_session *session = (struct spd_session *)data; struct spd_dev *dev = session->dev; int ret; spd_dbg(session->dev, "Dispatch DMA cmd queue[SID=%d]\n", GET_SID(session)); if (!IS_SESSION_READY(session)) { spd_err(dev, "Session is not ready.\n"); return; } if (dev->cmd_info.dma_state == SPD_DMA_STOP) { spd_dbg(dev, "DMA is not available.\n"); return; } for (;;) { ret = spurs_start_dma_cmd_queue(session); if ((ret != 0) || (dev->cmd_info.dma_state == SPD_DMA_STOP)) break; } } static int spurs_start_system_cmd_queue(struct spd_cmd_request *cmd_req, struct spd_dev *dev) { spd_cmd_t *cmd_packet = NULL; int index, rid; spd_dbg(dev, "Start system cmd queue\n"); if (!IS_CMD_STATE_QUEUING(cmd_req)) { spd_dbg(dev, "Request state is invalid.\n"); return -EPERM; } /* Get command packet */ index = spurs_get_cmd_packet(&cmd_packet, dev); if (index < 0) { spd_dbg(dev, "Can not reserve command packet.\n"); return -EBUSY; } /* Get Request ID */ rid = spurs_get_request_id(&dev->cmd_info.sys_rid); if (rid < 0) { spd_err(dev, "Can not allocate request ID\n"); spurs_release_cmd_packet(index, dev); return -EBUSY; } /* Setup command packet */ cmd_req->cmd_packet.request_id = (uint16_t)rid; #ifdef DEBUG print_cmd_packet(&cmd_req->cmd_packet); #endif memcpy(cmd_packet, &cmd_req->cmd_packet, sizeof(spd_cmd_t)); convert_packet_endian(cmd_packet, CONVERT_HOST_TO_SPURS); cmd_req->state = SPD_CMD_STATE_EXECUTING; spurs_remove_cmd_request(cmd_req); spurs_queue_command(&dev->cmd_info.sys_exec_queue, cmd_req); return index; } void spurs_dispatch_system_cmd_queue(void *data) { struct spd_dev *dev = (struct spd_dev *)data; struct spd_command_info *info = &dev->cmd_info; struct spd_cmd_request *cmd_req, *next; uint32_t cmd_flag; int index; spd_dbg(dev, "Dispatch system cmd queue\n"); if (dev->cmd_info.request_flag != 0) { spd_dbg(dev, "sending command.\n"); return; } cmd_flag = 0; list_for_each_entry_safe(cmd_req, next, &info->sys_cmd_queue.queue, queue) { if (!IS_CMD_STATE_QUEUING(cmd_req)) continue; index = spurs_start_system_cmd_queue(cmd_req, dev); if (index < 0) break; info->request_flag |= 1 << index; cmd_flag |= SPD_SEND_CMD_PKT_SIZE_1024 << (index * SPD_SEND_CMD_PKT_SHIFT); } if (cmd_flag != 0) { spd_dbg(dev, "Start system command : cmd=0x%08x\n", cmd_flag); spurs_set_rxmbox(SPD_MBOX0, cmd_flag, dev); } } int spurs_setup_user_command(unsigned int type, spd_ioctl_request_cmd *cmd, unsigned long user_buff_addr, uint32_t user_buff_size, uint32_t dma_dir, struct spd_cmd_request *cmd_req, struct spd_session *session) { struct spd_dev *dev = session->dev; int sg_num, ret; struct scatterlist *sgl = NULL; cmd_req->state = SPD_CMD_STATE_SETUP; cmd_req->cmd_type = type; cmd_req->cmd_packet.session_id = GET_SID(session); cmd_req->cmd_packet.op = cmd->op; cmd_req->cmd_packet.cmd_type = SPD_CMD_TYPE_USER; memcpy(cmd_req->cmd_packet.sdata, cmd->sdata, SPD_COMMAND_SDATA_SIZE); cmd_req->cmd_packet.data_size = cmd->len; if (cmd->len > 0) memcpy(cmd_req->cmd_packet.data, cmd->data, cmd->len); if (IS_CMD_TYPE_DMA(cmd_req)) { ret = spurs_check_dma_param( user_buff_addr, user_buff_size, dma_dir, dev); if (ret) return ret; cmd_req->dma_address = user_buff_addr; cmd_req->dma_size = user_buff_size; cmd_req->dma_direction = dma_dir; sg_num = spurs_dma_map_sg(user_buff_addr, user_buff_size, dma_dir, dev, &sgl); if (sg_num < 0) return sg_num; cmd_req->sgl = sgl; cmd_req->sg_num = sg_num; } return 0; } void spurs_schedule_cmd_queue_after_command_ack(struct spd_dev *dev) { struct spd_session_info *session_info = &dev->session_info; struct spd_cmd_request *req, *next; struct spd_session *curr, *last, *mark; uint32_t cmd_flag, mbox0; int index; if (dev->cmd_info.request_flag != 0) { spd_dbg(dev, "sending command.\n"); return; } spurs_get_rxmbox(SPD_MBOX0, &mbox0, dev); if (mbox0 != 0) { spd_err(dev, "mbox0 is %x.\n", mbox0); return; } cmd_flag = 0; /* System command scheduling */ list_for_each_entry_safe(req, next, &dev->cmd_info.sys_cmd_queue.queue, queue) { if (!IS_CMD_STATE_QUEUING(req)) continue; index = spurs_start_system_cmd_queue(req, dev); if (index < 0) break; dev->cmd_info.request_flag |= 1 << index; cmd_flag |= SPD_SEND_CMD_PKT_SIZE_1024 << (index * SPD_SEND_CMD_PKT_SHIFT); } /* User command scheduling */ if (session_info->curr_ack == NULL) { spd_dbg(dev, "There is no scheduling session.\n"); goto end; } curr = session_info->curr_ack; last = spurs_get_prev_session(curr, session_info); mark = NULL; for (;;) { spd_dbg(dev, "Schedule session is %d\n", GET_SID(curr)); if (!IS_SESSION_READY(curr)) { spd_dbg(dev, "Session[%d] is not ready.\n", GET_SID(curr)); goto next_session; } list_for_each_entry_safe(req, next, &curr->user_cmd_queue.queue, queue) { index = spurs_start_user_cmd_queue(req, curr); if (index >= 0) { mark = curr; dev->cmd_info.request_flag |= 1 << index; cmd_flag |= SPD_SEND_CMD_PKT_SIZE_1024 << (index * SPD_SEND_CMD_PKT_SHIFT); } else break; } next_session: if (curr == last) { if (mark == NULL) break; else { last = mark; mark = NULL; spd_dbg(dev, "Update last session=%d\n", GET_SID(last)); } } curr = spurs_get_next_session(curr, session_info); session_info->curr_ack = curr; } end: if (cmd_flag != 0) { spd_dbg(dev, "start SP3 command :cmd=0x%08x\n", cmd_flag); spurs_set_rxmbox(SPD_MBOX0, cmd_flag, dev); } } void spurs_receive_command_ack(uint32_t data, struct spd_dev *dev) { struct spd_command_info *info = &dev->cmd_info; int i; spd_spin_lock(&dev->spinlock); spd_dbg(dev, "Command ack : request=0x%08x, data=0x%08x\n", info->request_flag, data); info->request_flag &= ~data; for (i = 0; i < info->cmd_packet_num; i++) if (data & (1 << i)) spurs_release_cmd_packet(i, dev); /* Schedule command */ spurs_schedule_cmd_queue_after_command_ack(dev); spd_spin_unlock(&dev->spinlock); } void spurs_complete_cmd_request(int error, struct spd_cmd_request *req, struct spd_dev *dev) { if (req == NULL) return; if (req->sgl != NULL) spurs_dma_unmap_sg(req, dev); if (req->slb != NULL) spurs_release_scatter_list_buffer(req->slb, dev); if (IS_CMD_TYPE_USER(req)) if (IS_CMD_STATE_QUEUING(req) || IS_CMD_STATE_EXECUTING(req) || IS_CMD_STATE_COMPLETED(req)) spurs_decrement_transaction_count( req->cmd_packet.session_id, dev); spurs_remove_cmd_request(req); spurs_complete_ioctl_request(req->ioctl_req, error); } void spurs_schedule_cmd_queue_after_response(struct spd_dev *dev) { struct spd_command_info *cmd_info = &dev->cmd_info; struct spd_session_info *session_info = &dev->session_info; struct spd_session *curr, *last, *mark; int ret = 1; if (session_info->curr_res == NULL) { spd_dbg(dev, "There is no scheduling session.\n"); return; } curr = session_info->curr_res; last = spurs_get_prev_session(session_info->curr_res, session_info); mark = NULL; for (;;) { spd_dbg(dev, "schedule command[SID=%d].\n", GET_SID(curr)); if (!IS_SESSION_READY(curr)) { spd_dbg(dev, "Session is not ready.\n"); goto next_session; } /* DMA cmd queue schedule */ ret = spurs_start_dma_cmd_queue(curr); if (ret == 0) { if (cmd_info->dma_state == SPD_DMA_STOP) break; goto next_session; } /* Quota queue schedule */ ret = 1; if (spurs_start_quota_queue(curr) == 0) ret = 0; next_session: if (ret == 0) mark = curr; if (curr == last) { if (mark == NULL) break; else { last = mark; mark = NULL; } } curr = spurs_get_next_session(curr, session_info); session_info->curr_res = curr; } } void spurs_decrement_transaction_count(uint16_t sid, struct spd_dev *dev) { struct spd_session *session; session = spurs_get_session(sid, dev); if (session == NULL) return; BUG_ON((atomic_read(&session->trans_count) == 0) || (atomic_read(&dev->cmd_info.trans_count) == 0)); atomic_dec(&session->trans_count); atomic_dec(&dev->cmd_info.trans_count); } void spurs_receive_response(uint32_t data, struct spd_dev *dev) { struct spd_command_info *info = &dev->cmd_info; struct spd_cmd_request *req; spd_cmd_t *res; uint32_t start, end, idx; if ((data & SPD_RES_PKT_VALID_MASK) == 0) { spd_err(dev, "Packet index data is invalid.\n"); return; } spd_spin_lock(&dev->spinlock); start = data & SPD_RES_PKT_INDEX_MASK; end = (data >> SPD_RES_PKT_END_SHIFT) & SPD_RES_PKT_INDEX_MASK; spd_dbg(dev, "start=%d, end=%d\n", start, end); /* Check whether receiving response packets completed. */ idx = start; for (;;) { if (IS_CMD_TYPE_INVALID(&info->res_packet[idx])) { spd_dbg(dev, "Response packet has not been transferred.\n"); data |= SPD_RES_PKT_RETRY_FLAG; spurs_set_rxmbox(SPD_MBOX1, data, dev); spd_spin_unlock(&dev->spinlock); return; } if (idx == end) break; idx++; if (idx >= info->res_packet_num) idx = 0; } idx = start; for (;;) { /* Copy packet */ res = &info->res_packet[idx]; convert_packet_endian(res, CONVERT_SPURS_TO_HOST); #ifdef DEBUG print_cmd_packet(res); #endif /* DEBUG */ /* Copy packet to spd_request */ if (res->cmd_type == SPD_CMD_TYPE_USER) req = spurs_search_user_cmd_request(res, dev); else req = spurs_search_system_cmd_request(res, dev); if (req != NULL) { if (IS_IOCTL_REQUEST_RUNNING(req->ioctl_req)) { memcpy(&req->res_packet, res, sizeof(spd_cmd_t)); spurs_complete_cmd_request(0, req, dev); } else if (req->ioctl_req->error == ECANCELED) spd_dbg(dev, "Request is already canceled" "(sid=%d, rid=%d)\n", req->cmd_packet.session_id, req->cmd_packet.request_id); else spd_dbg(dev, "Request is invalid state" "(sid=%d, rid=%d)\n", req->cmd_packet.session_id, req->cmd_packet.request_id); } else spd_dbg(dev, "No request is found(sid=%d, rid=%d)\n", res->session_id, res->request_id); /* Clear packet */ memset(&info->res_packet[idx], 0, sizeof(spd_cmd_t)); info->res_packet[idx].cmd_type = SPD_CMD_TYPE_DUMMY; if (idx == end) break; /* End serch packet */ idx++; if (idx >= info->res_packet_num) idx = 0; } udelay (20); /* Send ack to spurs */ spurs_set_rxmbox(SPD_MBOX1, data, dev); spurs_schedule_cmd_queue_after_response(dev); spd_spin_unlock(&dev->spinlock); } void spurs_init_command_info(struct spd_dev *dev) { struct spd_command_info *info = &dev->cmd_info; uint64_t *slb_head; int i, j, entry, idx; atomic_set(&info->trans_count, 0); info->cmd_packet_num = dev->boot_info.host_param.cmd_buf_num; info->res_packet_num = dev->boot_info.host_param.res_buf_num; info->cmd_packet = spurs_get_dma_mem_area(SPD_COMMAND, dev); memset(info->used_cmd_packet, 0, sizeof(info->used_cmd_packet)); INIT_REQUEST_ID(&info->sys_rid); INIT_CMD_QUEUE("SYS_CMD_QUEUE", &info->sys_cmd_queue, spurs_dispatch_system_cmd_queue, (void *)dev); INIT_CMD_QUEUE("SYS_EXEC_QUEUE", &info->sys_exec_queue, NULL, NULL); INIT_LIST_HEAD(&info->sys_cmd_request); INIT_LIST_HEAD(&info->ioctl_request); #ifdef DEBUG for (i = 0; i < info->cmd_packet_num; i++) spd_dbg(dev, "Allocating %d command packet : %p\n", i, &info->cmd_packet[i]); #endif /* DEBUG */ info->res_packet = spurs_get_dma_mem_area(SPD_RESPONSE, dev); for (i = 0; i < info->res_packet_num; i++) { info->res_packet[i].cmd_type = SPD_CMD_TYPE_DUMMY; spd_dbg(dev, "Allocating %d response packet : %p\n", i, &info->res_packet[i]); } /* Setup scatter list buffer */ info->slb_num = 0; for (i = 0; i < SLB_SEG_NUM; i++) info->slb_num += dev->boot_info.host_param.sl_buf_num[i]; entry = SPD_SL_ENTRY_NUM_2MB; if (dev->boot_info.host_param.max_trans_size == SPD_DMA_MAX_8MB) entry = SPD_SL_ENTRY_NUM_8MB; idx = 0; for (i = 0; i < SLB_SEG_NUM; i++) { if (dev->boot_info.host_param.sl_buf_num[i] == 0) continue; slb_head = spurs_get_dma_mem_area(SPD_SLB0 + i, dev); for (j = 0; j < dev->boot_info.host_param.sl_buf_num[i]; j++) { info->slb[idx].index = idx; info->slb[idx].head = slb_head; info->slb[idx].entry = entry; spd_dbg(dev, "Allocating %d scatter list buffer: %p\n", idx, slb_head); slb_head += entry; idx++; } } memset(info->used_slb, 0, sizeof(info->used_slb)); } int spurs_get_request_id(struct spd_rid *rid) { int ret; ret = find_next_zero_bit(rid->bitmap, SPD_RID_NUM, rid->next); if (ret >= SPD_RID_NUM) ret = find_first_zero_bit(rid->bitmap, SPD_RID_NUM); if (ret < SPD_RID_NUM) { set_bit(ret, rid->bitmap); if (ret == SPD_RID_MAX) rid->next = 0; else rid->next = ret + 1; } else ret = -EBUSY; return ret; } void spurs_put_request_id(int id, struct spd_rid *rid) { BUG_ON(id > SPD_RID_MAX); clear_bit(id, rid->bitmap); } static inline void __spurs_start_cmd_queue(struct spd_cmd_queue *queue) { if (queue->state == SPD_QUEUE_STATE_START) return; queue->state = SPD_QUEUE_STATE_START; if (!spurs_cmd_queue_empty(queue) && (queue->dispatch != NULL)) (*queue->dispatch)(queue->data); } void spurs_start_session_queue(uint32_t queue, struct spd_session *session) { spd_dbg(session->dev, "start command queues for session[%d]\n", GET_SID(session)); if (queue & SPD_QUOTA_QUEUE) __spurs_start_cmd_queue(&session->quota_queue); if (queue & SPD_DMA_CMD_QUEUE) __spurs_start_cmd_queue(&session->dma_cmd_queue); if (queue & SPD_USER_CMD_QUEUE) __spurs_start_cmd_queue(&session->user_cmd_queue); if (queue & SPD_USER_EXEC_QUEUE) __spurs_start_cmd_queue(&session->exec_cmd_queue); } void spurs_start_cmd_queue(uint32_t queue, struct spd_dev *dev) { struct spd_session *session; struct spd_session_info *info = &dev->session_info; if (IS_USER_CMD_QUEUE_FLAG(queue)) list_for_each_entry(session, &info->session, list) spurs_start_session_queue(queue, session); if (queue & SPD_SYSTEM_CMD_QUEUE) __spurs_start_cmd_queue(&dev->cmd_info.sys_cmd_queue); if (queue & SPD_SYSTEM_EXEC_QUEUE) __spurs_start_cmd_queue(&dev->cmd_info.sys_exec_queue); } static inline void __spurs_stop_cmd_queue(struct spd_cmd_queue *queue) { if (queue->state == SPD_QUEUE_STATE_STOP) return; queue->state = SPD_QUEUE_STATE_STOP; } void spurs_stop_session_queue(uint32_t queue, struct spd_session *session) { spd_dbg(session->dev, "stop command queues for session[%d]\n", GET_SID(session)); if (queue & SPD_QUOTA_QUEUE) __spurs_stop_cmd_queue(&session->quota_queue); if (queue & SPD_DMA_CMD_QUEUE) __spurs_stop_cmd_queue(&session->dma_cmd_queue); if (queue & SPD_USER_CMD_QUEUE) __spurs_stop_cmd_queue(&session->user_cmd_queue); if (queue & SPD_USER_EXEC_QUEUE) __spurs_stop_cmd_queue(&session->exec_cmd_queue); } void spurs_stop_cmd_queue(uint32_t queue, struct spd_dev *dev) { struct spd_session *session; struct spd_session_info *info = &dev->session_info; if (IS_USER_CMD_QUEUE_FLAG(queue)) list_for_each_entry(session, &info->session, list) spurs_stop_session_queue(queue, session); if (queue & SPD_SYSTEM_CMD_QUEUE) __spurs_stop_cmd_queue(&dev->cmd_info.sys_cmd_queue); if (queue & SPD_SYSTEM_EXEC_QUEUE) __spurs_stop_cmd_queue(&dev->cmd_info.sys_exec_queue); } static void __spurs_purge_cmd_queue(struct spd_cmd_queue *queue, int status, int purge, struct spd_dev *dev) { struct spd_cmd_request *cmd_req, *next; if (purge) queue->state = SPD_QUEUE_STATE_PURGE; list_for_each_entry_safe(cmd_req, next, &queue->queue, queue) spurs_complete_cmd_request(status, cmd_req, dev); } void spurs_purge_session_queue(uint32_t queue, int status, int purge, struct spd_session *session) { struct spd_dev *dev = session->dev; spd_dbg(session->dev, "purge command queues for session[%d]\n", GET_SID(session)); if (queue & SPD_QUOTA_QUEUE) __spurs_purge_cmd_queue(&session->quota_queue, status, purge, dev); if (queue & SPD_DMA_CMD_QUEUE) __spurs_purge_cmd_queue(&session->dma_cmd_queue, status, purge, dev); if (queue & SPD_USER_CMD_QUEUE) __spurs_purge_cmd_queue(&session->user_cmd_queue, status, purge, dev); if (queue & SPD_USER_EXEC_QUEUE) __spurs_purge_cmd_queue(&session->exec_cmd_queue, status, purge, dev); } void spurs_purge_cmd_queue(uint32_t queue, int status, int purge, struct spd_dev *dev) { struct spd_session *session; struct spd_session_info *info = &dev->session_info; if (IS_USER_CMD_QUEUE_FLAG(queue)) list_for_each_entry(session, &info->session, list) spurs_purge_session_queue(queue, status, purge, session); if (queue & SPD_SYSTEM_CMD_QUEUE) __spurs_purge_cmd_queue(&dev->cmd_info.sys_cmd_queue, status, purge, dev); if (queue & SPD_SYSTEM_EXEC_QUEUE) __spurs_purge_cmd_queue(&dev->cmd_info.sys_exec_queue, status, purge, dev); } int spurs_request_user_command(unsigned int ioctl_cmd, spd_ioctl_request_cmd *cmd, unsigned long buff_addr, uint32_t buff_size, uint32_t dma_dir, void __user *res, struct spd_session *session) { struct spd_dev *dev = session->dev; struct spd_cmd_request *cmd_req; struct spd_ioctl_request *ioctl_req; unsigned int cmd_type; int ret; if (!IS_FW_RUNNING(session->dev) && !IS_FW_INITTING(session->dev)) { spd_dbg(session->dev, "Firmware state is not running.\n"); return -EPERM; } if (IS_CMD_REJECT(session)) { spd_dbg(session->dev, "This session rejects command.\n"); return -EPERM; } if (cmd->op == SP3_CC_SESSION_DELETE) { spd_dbg(session->dev, "SP3_CC_SESSION_DELETE is system command.\n"); return -EINVAL; } if (cmd->len > SPD_CMD_DATA_SIZE) { spd_dbg(session->dev, "Parameter size is invalid.\n"); return -EINVAL; } switch (ioctl_cmd) { case SPD_IOCTL_REQUEST_CMD: cmd_type = SPD_CMD_TYPE_NORMAL; break; case SPD_IOCTL_DATA_TRANSFER: #ifdef CONFIG_COMPAT case SPD_IOCTL_DATA_TRANSFER32: #endif /* CONFIG_COMPAT */ cmd_type = SPD_CMD_TYPE_TRANSFER; break; default: spd_dbg(dev, "Not supported command(0x%x)\n", ioctl_cmd); return -EINVAL; } cmd_req = kzalloc(sizeof(struct spd_cmd_request), GFP_KERNEL); if (cmd_req == NULL) { spd_err(dev, "Failed to allocate spd_cmd_request\n"); return -ENOMEM; } spd_dbg(session->dev, "addr = 0x%lx, size = %d, dir=%d\n", buff_addr, buff_size, dma_dir); mutex_lock(&session->mutex); ret = spurs_setup_user_command(cmd_type, cmd, buff_addr, buff_size, dma_dir, cmd_req, session); if (ret) { mutex_unlock(&session->mutex); kfree(cmd_req); return ret; } switch (ioctl_cmd) { case SPD_IOCTL_REQUEST_CMD: cmd_req->response.normal = res; break; case SPD_IOCTL_DATA_TRANSFER: cmd_req->response.dma = res; break; #ifdef CONFIG_COMPAT case SPD_IOCTL_DATA_TRANSFER32: cmd_req->response.dma32 = res; break; #endif /* CONFIG_COMPAT */ } ret = spurs_create_ioctl_request(ioctl_cmd, cmd_req, dev, &ioctl_req); if (ret) { spurs_dma_unmap_sg(cmd_req, dev); kfree(cmd_req); ret = -ENOMEM; goto end; } cmd_req->ioctl_req = ioctl_req; ioctl_req->state = SPD_REQUEST_STATE_RUNNING; spd_spin_lock_irq(&session->dev->spinlock); list_add_tail(&cmd_req->list, &session->cmd_request); list_add_tail(&ioctl_req->list, &session->ioctl_request); ret = spurs_queue_command(&session->quota_queue, cmd_req); if (ret < 0) { /* Error occured. */ ioctl_req->error = -ret; ioctl_req->state = SPD_REQUEST_STATE_COMPLETED; } spd_spin_unlock_irq(&session->dev->spinlock); ret = ioctl_req->tag_id; end: mutex_unlock(&session->mutex); return ret; } int spurs_request_system_command(spd_ioctl_request_syscmd *cmd, spd_ioctl_request_syscmd *res, struct spd_dev *dev) { struct spd_cmd_request *cmd_req; struct spd_ioctl_request *ioctl_req; struct spd_command_info *info = &dev->cmd_info; spd_cmd_t *cmd_pkt; int tag, ret, error; if (!IS_FW_RUNNING(dev) && !IS_FW_INITTING(dev)) { spd_dbg(dev, "Firmware state is not running.\n"); return -EPERM; } if ((cmd == NULL) || (res == NULL)) { spd_dbg(dev, "Invalid data address\n"); return -EINVAL; } if (cmd->len > SPD_CMD_DATA_SIZE) { spd_dbg(dev, "Parameter size is invalid.\n"); return -EINVAL; } cmd_req = kzalloc(sizeof(struct spd_cmd_request), GFP_KERNEL); if (cmd_req == NULL) { spd_err(dev, "Failed to allocate spd_cmd_request\n"); return -ENOMEM; } mutex_lock(&dev->mutex); ret = spurs_create_ioctl_request(SPD_IOCTL_REQUEST_SYSCMD, cmd_req, dev, &ioctl_req); if (ret) { kfree(cmd_req); mutex_unlock(&dev->mutex); return -ENOMEM; } tag = ioctl_req->tag_id; cmd_req->state = SPD_CMD_STATE_SETUP; cmd_req->cmd_type = SPD_CMD_TYPE_SYS; cmd_pkt = &cmd_req->cmd_packet; cmd_pkt->session_id = 0; cmd_pkt->cmd_type = SPD_CMD_TYPE_SYSTEM; cmd_pkt->op = cmd->op; cmd_pkt->data_size = cmd->len; if (cmd->len > 0) memcpy(cmd_pkt->data, cmd->data, cmd->len); cmd_req->ioctl_req = ioctl_req; ioctl_req->state = SPD_REQUEST_STATE_RUNNING; spd_spin_lock_irq(&dev->spinlock); list_add_tail(&cmd_req->list, &info->sys_cmd_request); list_add_tail(&ioctl_req->list, &info->ioctl_request); /* Execute command */ cmd_req->state = SPD_CMD_STATE_QUEUING; ret = spurs_queue_command(&info->sys_cmd_queue, cmd_req); if (ret < 0) { /* Error occured. */ ioctl_req->error = -ret; ioctl_req->state = SPD_REQUEST_STATE_COMPLETED; spd_spin_unlock_irq(&dev->spinlock); mutex_unlock(&dev->mutex); goto end; } spd_spin_unlock_irq(&dev->spinlock); mutex_unlock(&dev->mutex); /* Wait to complete command */ ret = spurs_wait_system_command_request(tag, SPD_REQUEST_SYSCMD_TIMEOUT, dev); if (ret != 0) goto end; ret = spurs_get_request_result(ioctl_req, dev, &error); if (ret != 0) goto end; ret = error; memcpy(res, &cmd_req->response.syscmd, sizeof(spd_ioctl_request_syscmd)); end: spd_spin_lock_irq(&dev->spinlock); if (ret != 0) spurs_remove_cmd_request(cmd_req); list_del_init(&cmd_req->list); list_del_init(&ioctl_req->list); spd_spin_unlock_irq(&dev->spinlock); kfree(cmd_req); spurs_delete_ioctl_request(ioctl_req, dev); return ret; } int spurs_request_debug_command(uint16_t op, spd_ioctl_syscmd_dbg *cmd, spd_ioctl_syscmd_dbg *res, struct spd_dev *dev) { struct spd_cmd_request *cmd_req; struct spd_ioctl_request *ioctl_req; struct spd_command_info *info = &dev->cmd_info; spd_debug_cmd_t *cmd_pkt; int tag, ret, error; if (!IS_FW_RUNNING(dev) && !IS_FW_INITTING(dev)) { spd_err(dev, "Firmware state is not running.\n"); return -EPERM; } if ((op & 0xFFF0) != 0xFFF0) { spd_dbg(dev, "Operation is not for debug.\n"); return -EINVAL; } if (cmd->len > SPD_DBG_TRACE_DATA_LEN) { spd_dbg(dev, "Parameter size is invalid.\n"); return -EINVAL; } cmd_req = kzalloc(sizeof(struct spd_cmd_request), GFP_KERNEL); if (cmd_req == NULL) { spd_err(dev, "Failed to allocate spd_cmd_request\n"); return -ENOMEM; } mutex_lock(&dev->mutex); ret = spurs_create_ioctl_request(SPD_IOCTL_REQUEST_DBGCMD, cmd_req, dev, &ioctl_req); if (ret) { kfree(cmd_req); mutex_unlock(&dev->mutex); return -ENOMEM; } tag = ioctl_req->tag_id; cmd_req->state = SPD_CMD_STATE_SETUP; cmd_req->cmd_type = SPD_CMD_TYPE_DEBUG; cmd_pkt = (spd_debug_cmd_t *)&cmd_req->cmd_packet; cmd_pkt->session_id = 0; cmd_pkt->cmd_type = SPD_CMD_TYPE_SYSTEM; cmd_pkt->op = op; cmd_pkt->trigger_code = cmd->code; cmd_pkt->trigger_mask = cmd->mask; cmd_pkt->data_size = cmd->len; if (cmd->len > 0) memcpy(cmd_pkt->data, cmd->data, cmd->len); cmd_req->ioctl_req = ioctl_req; ioctl_req->state = SPD_REQUEST_STATE_RUNNING; spd_spin_lock_irq(&dev->spinlock); list_add_tail(&cmd_req->list, &info->sys_cmd_request); list_add_tail(&ioctl_req->list, &info->ioctl_request); /* Execute command */ cmd_req->state = SPD_CMD_STATE_QUEUING; ret = spurs_queue_command(&info->sys_cmd_queue, cmd_req); if (ret < 0) { /* Error occured. */ ioctl_req->error = -ret; ioctl_req->state = SPD_REQUEST_STATE_COMPLETED; } spd_spin_unlock_irq(&dev->spinlock); mutex_unlock(&dev->mutex); /* Wait to complete command */ ret = spurs_wait_system_command_request( tag, SPD_REQUEST_SYSCMD_TIMEOUT, dev); mutex_lock(&dev->mutex); if (ret != 0) goto end; ret = spurs_get_request_result(ioctl_req, dev, &error); if (ret) goto end; ret = error; memcpy(res, &cmd_req->response.dbgcmd, sizeof(spd_ioctl_syscmd_dbg)); end: spd_spin_lock_irq(&dev->spinlock); list_del_init(&cmd_req->queue); list_del_init(&cmd_req->list); list_del_init(&ioctl_req->list); spd_spin_unlock_irq(&dev->spinlock); kfree(cmd_req); spurs_delete_ioctl_request(ioctl_req, dev); mutex_unlock(&dev->mutex); return ret; }