gxp: [Copybara Auto Merge] Merge branch 'zumapro-u' into 'android14-gs-pixel-6.1-zuma-pro'
gxp: Kconfig use GPL 2.0-only license
gcip: update comments of handle_awaiter_arrived
Bug: 317672292
gcip: introduce orig_dir under gcip_iommu_mapping
Bug: 302127145
gcip: introduce after_unmap operator
Bug: 291038199
GCIP_HEADER_REV_ID: 0400e310a4ea4fa09c4f72b61bdd4a87d35c0086
gxp: acquire pm on debugfs mailbox
Bug: 317850970
gxp: introduce MCU boot in recovery mode
Bug: 305146624
Bug: 305611797
gxp: GEM5 also needs TIME_DELAY_FACTOR change.
Bug: 278475783
gxp: handle UCI responses in IRQ handler
Bug: 316074256
Revert^2 "gxp: change wait_list_lock to spinlock"
Revert^2 "gxp: change resp_queue to spinlock"
gxp: use spin_lock_irq* for wait queue lock
Bug: 316074256 (repeat)
Bug: 317160980
Revert "gxp: change resp_queue to spinlock"
Revert "gxp: change wait_list_lock to spinlock"
gxp: remove the DMA range limit in gxp_dma_init
Bug: 303339361
gxp: change wait_list_lock to spinlock
Bug: 316074256 (repeat)
gxp: change resp_queue to spinlock
Bug: 316074256 (repeat)
gxp: increase number of VD allocations to 15
Bug: 316834877
gcip: move awaiter releasing out of spinlock
Bug: 317672292 (repeat)
gcip: use the adjusted dir for dma_buf_map_attachment
Bug: 302127145 (repeat)
gcip: set orig_dir correctly in buffer mapping
Bug: 302127145 (repeat)
gcip: distinguish dir and orig_dir in mapping
Bug: 302127145 (repeat)
gcip: fix returning wrong error pointer
gcip: introduce after_unmap operator
Bug: 291038199 (repeat)
GCIP_MAIN_REV_ID: b0814de8d1292bee416a0ff3f28ff4c55d4386c5
Signed-off-by: Aurora zuma pro automerger <[email protected]>
GitOrigin-RevId: 555b80ee2055fa3031ba2f09438dd014717c4a29
Change-Id: Id00eff240f02f5f364fa751dfd9dc02043ea728b
diff --git a/Kconfig b/Kconfig
new file mode 100644
index 0000000..174b3e4
--- /dev/null
+++ b/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "GXP device"
+
+config GXP
+ tristate "Device driver for GXP"
+ select DMA_SHARED_BUFFER
+ select GENERIC_ALLOCATOR
+ select IOMMU_API
+ select PM
+ help
+ This driver supports the GXP device. Say Y if you want to
+ include this driver in the kernel.
+
+ To compile this driver as a module, choose M here. The module will be
+ called "gxp".
+
+config CALLISTO
+ tristate "Callisto GXP device driver"
+ depends on GXP
+ help
+ This driver supports the Callisto device. Say Y if you want to
+ include this driver in the kernel.
+
+ To compile this driver as a module, choose M here. The module will be
+ called "gxp".
+
+
+endmenu
diff --git a/callisto/config.h b/callisto/config.h
index f9410ce..720657c 100644
--- a/callisto/config.h
+++ b/callisto/config.h
@@ -35,7 +35,7 @@
/* Size of slice per VD. */
#define GXP_SHARED_SLICE_SIZE SZ_32K
/* The max number of active virtual devices. */
-#define GXP_NUM_SHARED_SLICES 7
+#define GXP_NUM_SHARED_SLICES 15
/*
* Can be coherent with AP
diff --git a/callisto/iova.h b/callisto/iova.h
index 52751b7..c09637b 100644
--- a/callisto/iova.h
+++ b/callisto/iova.h
@@ -24,6 +24,8 @@
#define GXP_IREMAP_SECURE_OFFSET GXP_IREMAP_CODE_SIZE
#define GXP_IREMAP_SECURE_SIZE 0x100000 /* 1MB */
#define GXP_IREMAP_DATA_OFFSET (GXP_IREMAP_SECURE_OFFSET + GXP_IREMAP_SECURE_SIZE)
-#define GXP_IREMAP_DATA_SIZE 0x200000 /* 2MB */
+#define GXP_IREMAP_DATA_SIZE 0x1FF000 /* (2MB - 4KB) */
+/* Last 4K page of IREMAP_DATA */
+#define GXP_MCU_BOOT_MODE_OFFSET (GXP_IREMAP_DATA_OFFSET + GXP_IREMAP_DATA_SIZE)
#endif /* __CALLISTO_IOVA_H__ */
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-iommu.c b/gcip-kernel-driver/drivers/gcip/gcip-iommu.c
index 1ca5bcc..17e3e82 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-iommu.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-iommu.c
@@ -503,7 +503,7 @@
}
dma_buf_unmap_attachment(dmabuf_mapping->dma_buf_attachment, dmabuf_mapping->sgt_default,
- mapping->dir);
+ mapping->orig_dir);
dma_buf_detach(dmabuf_mapping->dma_buf, dmabuf_mapping->dma_buf_attachment);
dma_buf_put(dmabuf_mapping->dma_buf);
kfree(dmabuf_mapping);
@@ -903,7 +903,7 @@
container_of(mapping, struct gcip_iommu_dma_buf_mapping, mapping);
seq_printf(s, " %pad %lu %s %s %pad", &mapping->device_address,
- DIV_ROUND_UP(mapping->size, PAGE_SIZE), dma_dir_tbl[mapping->dir],
+ DIV_ROUND_UP(mapping->size, PAGE_SIZE), dma_dir_tbl[mapping->orig_dir],
dmabuf_mapping->dma_buf->exp_name,
&sg_dma_address(dmabuf_mapping->sgt_default->sgl));
entry_show_dma_addrs(mapping, s);
@@ -1046,7 +1046,7 @@
* @domain: The desired IOMMU domain where the sgt should be mapped.
* @sgt: The scatter-gather table to map to the target IOMMU domain.
* @size: The size of the buffer.
- * @dir: DMA direction.
+ * @orig_dir: The original DMA direction that user try to map.
* @offset: The offset of the start address.
* @iova: The target IOVA to map @sgt. If it is 0, this function allocates an IOVA space.
* @gcip_map_flags: The flags used to create the mapping, which can be encoded with
@@ -1057,7 +1057,7 @@
*/
static struct gcip_iommu_mapping *
gcip_iommu_domain_map_buffer_sgt(struct gcip_iommu_domain *domain, struct sg_table *sgt,
- size_t size, enum dma_data_direction dir, ulong offset,
+ size_t size, enum dma_data_direction orig_dir, ulong offset,
dma_addr_t iova, u64 gcip_map_flags)
{
struct gcip_iommu_mapping *mapping;
@@ -1069,13 +1069,12 @@
mapping->domain = domain;
mapping->sgt = sgt;
- mapping->gcip_map_flags = gcip_map_flags;
mapping->type = GCIP_IOMMU_MAPPING_BUFFER;
mapping->size = size;
- mapping->dir = dir;
+ mapping->orig_dir = orig_dir;
mapping->user_specified_daddr = iova;
- ret = gcip_iommu_domain_map_sgt_to_iova(domain, sgt, iova, &mapping->gcip_map_flags);
+ ret = gcip_iommu_domain_map_sgt_to_iova(domain, sgt, iova, &gcip_map_flags);
if (!ret) {
ret = -ENOSPC;
dev_err(domain->dev, "Failed to map sgt to domain (ret=%d)\n", ret);
@@ -1085,6 +1084,8 @@
mmgrab(current->mm);
mapping->owning_mm = current->mm;
mapping->device_address = sg_dma_address(sgt->sgl) + offset;
+ mapping->gcip_map_flags = gcip_map_flags;
+ mapping->dir = GCIP_MAP_FLAGS_GET_DMA_DIRECTION(gcip_map_flags);
return mapping;
@@ -1100,7 +1101,7 @@
* @dmabuf: The shared dma-buf object.
* @attachment: The device attachment of @dmabuf.
* @sgt: The scatter-gather table to map to the target IOMMU domain.
- * @dir: DMA direction.
+ * @orig_dir: The original DMA direction that user try to map.
* @iova: The target IOVA to map @sgt. If it is 0, this function allocates an IOVA space.
* @gcip_map_flags: The flags used to create the mapping, which can be encoded with
* gcip_iommu_encode_gcip_map_flags() or `GCIP_MAP_FLAGS_DMA_*_TO_FLAGS` macros.
@@ -1111,7 +1112,8 @@
static struct gcip_iommu_mapping *
gcip_iommu_domain_map_dma_buf_sgt(struct gcip_iommu_domain *domain, struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment, struct sg_table *sgt,
- enum dma_data_direction dir, dma_addr_t iova, u64 gcip_map_flags)
+ enum dma_data_direction orig_dir, dma_addr_t iova,
+ u64 gcip_map_flags)
{
struct gcip_iommu_dma_buf_mapping *dmabuf_mapping;
struct gcip_iommu_mapping *mapping;
@@ -1129,8 +1131,7 @@
mapping->domain = domain;
mapping->size = dmabuf->size;
mapping->type = GCIP_IOMMU_MAPPING_DMA_BUF;
- mapping->dir = dir;
- mapping->gcip_map_flags = gcip_map_flags;
+ mapping->orig_dir = orig_dir;
mapping->user_specified_daddr = iova;
if (domain->default_domain) {
@@ -1147,8 +1148,8 @@
goto err_copy_sgt;
}
- nents_mapped = gcip_iommu_domain_map_sgt_to_iova(domain, mapping->sgt, iova,
- &mapping->gcip_map_flags);
+ nents_mapped =
+ gcip_iommu_domain_map_sgt_to_iova(domain, mapping->sgt, iova, &gcip_map_flags);
if (!nents_mapped) {
ret = -ENOSPC;
dev_err(domain->dev, "Failed to map dmabuf to IOMMU domain (ret=%d)\n", ret);
@@ -1156,6 +1157,8 @@
}
mapping->device_address = sg_dma_address(mapping->sgt->sgl);
+ mapping->gcip_map_flags = gcip_map_flags;
+ mapping->dir = GCIP_MAP_FLAGS_GET_DMA_DIRECTION(gcip_map_flags);
return mapping;
@@ -1173,7 +1176,7 @@
struct mutex *pin_user_pages_lock)
{
struct gcip_iommu_mapping *mapping;
- enum dma_data_direction dir = GCIP_MAP_FLAGS_GET_DMA_DIRECTION(gcip_map_flags);
+ enum dma_data_direction orig_dir = GCIP_MAP_FLAGS_GET_DMA_DIRECTION(gcip_map_flags);
uint num_pages = 0;
struct page **pages;
ulong offset;
@@ -1181,7 +1184,7 @@
struct sg_table *sgt;
uint gup_flags = gcip_iommu_get_gup_flags(host_address, domain->dev);
- if (!valid_dma_direction(dir))
+ if (!valid_dma_direction(orig_dir))
return ERR_PTR(-EINVAL);
if (size == 0)
@@ -1206,7 +1209,6 @@
}
if (!(gup_flags & FOLL_WRITE)) {
- dir = DMA_TO_DEVICE;
gcip_map_flags &= ~(((BIT(GCIP_MAP_FLAGS_DMA_DIRECTION_BIT_SIZE) - 1)
<< GCIP_MAP_FLAGS_DMA_DIRECTION_OFFSET));
gcip_map_flags |= GCIP_MAP_FLAGS_DMA_DIRECTION_TO_FLAGS(DMA_TO_DEVICE);
@@ -1225,7 +1227,7 @@
goto err_free_table;
}
- mapping = gcip_iommu_domain_map_buffer_sgt(domain, sgt, size, dir, offset, iova,
+ mapping = gcip_iommu_domain_map_buffer_sgt(domain, sgt, size, orig_dir, offset, iova,
gcip_map_flags);
if (IS_ERR(mapping)) {
ret = PTR_ERR(mapping);
@@ -1270,14 +1272,19 @@
struct dma_buf_attachment *attachment;
struct sg_table *sgt;
struct gcip_iommu_mapping *mapping;
- enum dma_data_direction dir = GCIP_MAP_FLAGS_GET_DMA_DIRECTION(gcip_map_flags);
+ enum dma_data_direction orig_dir;
+ enum dma_data_direction dir;
int ret;
- if (!valid_dma_direction(dir)) {
- dev_err(dev, "Invalid dma data direction (dir=%d)\n", dir);
+ orig_dir = GCIP_MAP_FLAGS_GET_DMA_DIRECTION(gcip_map_flags);
+ if (!valid_dma_direction(orig_dir)) {
+ dev_err(dev, "Invalid dma data direction (dir=%d)\n", orig_dir);
return ERR_PTR(-EINVAL);
}
+ gcip_map_flags_adjust_dir(&gcip_map_flags);
+ dir = GCIP_MAP_FLAGS_GET_DMA_DIRECTION(gcip_map_flags);
+
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf)) {
dev_err(dev, "Failed to get dma-buf (ret=%ld)\n", PTR_ERR(dmabuf));
@@ -1304,10 +1311,10 @@
goto err_map_attachment;
}
- mapping = gcip_iommu_domain_map_dma_buf_sgt(domain, dmabuf, attachment, sgt, dir, iova,
+ mapping = gcip_iommu_domain_map_dma_buf_sgt(domain, dmabuf, attachment, sgt, orig_dir, iova,
gcip_map_flags);
if (IS_ERR(mapping)) {
- ret = PTR_ERR(sgt);
+ ret = PTR_ERR(mapping);
goto err_map_dma_buf_sgt;
}
@@ -1330,11 +1337,19 @@
void gcip_iommu_mapping_unmap(struct gcip_iommu_mapping *mapping)
{
+ void *data = mapping->data;
+ const struct gcip_iommu_mapping_ops *ops = mapping->ops;
+
if (mapping->type == GCIP_IOMMU_MAPPING_BUFFER) {
gcip_iommu_mapping_unmap_buffer(mapping);
} else if (mapping->type == GCIP_IOMMU_MAPPING_DMA_BUF) {
gcip_iommu_mapping_unmap_dma_buf(mapping);
}
+
+ /* From now on, @mapping is released and must not be accessed. */
+
+ if (ops && ops->after_unmap)
+ ops->after_unmap(data);
}
dma_addr_t gcip_iommu_alloc_iova(struct gcip_iommu_domain *domain, size_t size, u64 gcip_map_flags)
diff --git a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
index 162731f..b170a29 100644
--- a/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
+++ b/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -237,7 +237,7 @@
static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *resp)
{
struct gcip_mailbox_wait_list_elem *cur, *nxt;
- struct gcip_mailbox_resp_awaiter *awaiter;
+ struct gcip_mailbox_resp_awaiter *awaiter = NULL;
unsigned long flags;
u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
@@ -254,38 +254,34 @@
cur->async_resp->status = GCIP_MAILBOX_STATUS_OK;
memcpy(cur->async_resp->resp, resp, mailbox->resp_elem_size);
list_del(&cur->list);
- if (cur->awaiter) {
- awaiter = cur->awaiter;
-
+ awaiter = cur->awaiter;
+ if (awaiter)
/*
* The timedout handler will be fired, but pended by waiting for acquiring
* the wait_list_lock.
*/
TEST_TRIGGER_TIMEOUT_RACE(awaiter);
-
- /*
- * If canceling timeout_work succeeded, we have to decrease the reference
- * count here because the timeout handler will not be* called. Otherwise,
- * the timeout handler is already canceled or pending by race. If it is
- * canceled, the count must be decreased already, and if it is pending, the
- * timeout handler will decrease the awaiter reference.
- */
- if (cancel_delayed_work(&awaiter->timeout_work))
- gcip_mailbox_awaiter_dec_refs(awaiter);
- /*
- * If `handle_awaiter_arrived` callback is defined, @awaiter will be
- * released from the implementation side. Otherwise, it should be freed from
- * here.
- */
- if (mailbox->ops->handle_awaiter_arrived)
- mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
- gcip_mailbox_awaiter_dec_refs(awaiter);
- }
kfree(cur);
break;
}
RELEASE_WAIT_LIST_LOCK(true, flags);
+
+ if (!awaiter)
+ return;
+
+ /*
+ * If canceling timeout_work succeeded, we have to decrease the reference count here because
+ * the timeout handler will not be called. Otherwise, the timeout handler is already
+ * canceled or pending by race. If it is canceled, the count must be decreased already, and
+ * if it is pending, the timeout handler will decrease the awaiter reference.
+ */
+ if (cancel_delayed_work(&awaiter->timeout_work))
+ gcip_mailbox_awaiter_dec_refs(awaiter);
+ if (mailbox->ops->handle_awaiter_arrived)
+ mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
+ /* Remove the reference of the arrived handler. */
+ gcip_mailbox_awaiter_dec_refs(awaiter);
}
/*
diff --git a/gcip-kernel-driver/include/gcip/gcip-iommu.h b/gcip-kernel-driver/include/gcip/gcip-iommu.h
index d378a31..7169700 100644
--- a/gcip-kernel-driver/include/gcip/gcip-iommu.h
+++ b/gcip-kernel-driver/include/gcip/gcip-iommu.h
@@ -103,6 +103,17 @@
GCIP_IOMMU_MAPPING_DMA_BUF,
};
+/* Operaters for `struct gcip_iommu_mapping`. */
+struct gcip_iommu_mapping_ops {
+ /*
+ * Called after the corresponding mapping of @data is unmapped and released. Since its
+ * `struct gcip_iommu_mapping` instance is released, it won't be passed to the callback.
+ *
+ * This callback is optional.
+ */
+ void (*after_unmap)(void *data);
+};
+
/**
* struct gcip_iommu_mapping - Contains the information of sgt mapping to the domain.
* @type: Type of the mapping.
@@ -113,14 +124,21 @@
* information to the given domain received from the custom IOVA allocator.
* If the given domain is the default domain, the pointer will be set to the sgt received from
* default allocator.
- * @dir: The data direction that user tried to map.
- * This value may be different from the one encoded in gcip_map_flags.
+ * @dir: The dma data direction may be adjusted due to the system or hardware limit.
+ * This value is the real one that was used for mapping and should be the same as the one
+ * encoded in gcip_map_flags.
+ * This field should be used in revert functions and dma sync functions.
+ * @orig_dir: The data direction that the user originally tried to map.
+ * This value may be different from the one encoded in gcip_map_flags.
+ * This field should be used for logging to user to hide the underlying mechanisms
* @gcip_map_flags: The flags used to create the mapping, which can be encoded with
* gcip_iommu_encode_gcip_map_flags() or `GCIP_MAP_FLAGS_DMA_*_TO_FLAGS` macros.
* @owning_mm: For holding a reference to MM.
* @user_specified_daddr: If true, its IOVA address was specified by the user from the `*_to_iova`
* mapping functions and it won't free that when it's going to be unmapped.
* It's user's responsibility to manage the IOVA region.
+ * @ops: User defined operators.
+ * @data: User defined data.
*/
struct gcip_iommu_mapping {
enum gcip_iommu_mapping_type type;
@@ -130,6 +148,7 @@
uint num_pages;
struct sg_table *sgt;
enum dma_data_direction dir;
+ enum dma_data_direction orig_dir;
u64 gcip_map_flags;
/*
* TODO(b/302510715): Use another wrapper struct to contain this because it is used in
@@ -137,6 +156,8 @@
*/
struct mm_struct *owning_mm;
bool user_specified_daddr;
+ const struct gcip_iommu_mapping_ops *ops;
+ void *data;
};
/*
@@ -465,4 +486,15 @@
*/
void gcip_iommu_free_iova(struct gcip_iommu_domain *domain, dma_addr_t iova, size_t size);
+static inline void gcip_iommu_mapping_set_ops(struct gcip_iommu_mapping *mapping,
+ const struct gcip_iommu_mapping_ops *ops)
+{
+ mapping->ops = ops;
+}
+
+static inline void gcip_iommu_mapping_set_data(struct gcip_iommu_mapping *mapping, void *data)
+{
+ mapping->data = data;
+}
+
#endif /* __GCIP_IOMMU_H__ */
diff --git a/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
index 4a1c823..502d297 100644
--- a/gcip-kernel-driver/include/gcip/gcip-mailbox.h
+++ b/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -320,7 +320,7 @@
* Handles the asynchronous response which arrives well. How to handle it depends on the
* chip implementation. However, @awaiter should be released by calling the
* `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
- * @awaiter anymore. This is called with the `wait_list_lock` being held.
+ * @awaiter anymore.
* Context: normal and in_interrupt().
*/
void (*handle_awaiter_arrived)(struct gcip_mailbox *mailbox,
diff --git a/gxp-config.h b/gxp-config.h
index ed49644..56a84bf 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -20,7 +20,7 @@
#endif /* unknown */
-#if IS_ENABLED(CONFIG_GXP_ZEBU)
+#if IS_ENABLED(CONFIG_GXP_ZEBU) || IS_ENABLED(CONFIG_GXP_GEM5)
#define GXP_TIME_DELAY_FACTOR 100
#elif IS_ENABLED(CONFIG_GXP_IP_ZEBU)
#define GXP_TIME_DELAY_FACTOR 500
@@ -60,6 +60,10 @@
#define GXP_ENABLE_DEBUG_DUMP 1
#endif
+#ifndef GXP_LPM_IN_AON
+#define GXP_LPM_IN_AON 0
+#endif
+
#define GXP_DEBUG_DUMP_IOVA_BASE (0xF5000000)
#define GXP_TELEMETRY_IOVA_BASE (0xF6000000)
diff --git a/gxp-dci.c b/gxp-dci.c
index 819b9ce..194bec6 100644
--- a/gxp-dci.c
+++ b/gxp-dci.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <uapi/linux/sched/types.h>
#include "gxp-dci.h"
@@ -390,7 +391,7 @@
mailbox->resp_queue_size = MBOX_RESP_QUEUE_NUM_ENTRIES;
mailbox->resp_queue_head = 0;
- mutex_init(&mailbox->resp_queue_lock);
+ spin_lock_init(&mailbox->resp_queue_lock);
/* Allocate and initialize the mailbox descriptor */
ret = gxp_dma_alloc_coherent_buf(mailbox->gxp, vd->domain,
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 4f6d246..fac5cb5 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -140,8 +140,8 @@
struct gxp_dma_iommu_manager *mgr;
int ret;
- /* GXP can only address 32-bit IOVAs */
- ret = dma_set_mask_and_coherent(gxp->dev, DMA_BIT_MASK(32));
+ /* Remove the limit of DMA ranges. */
+ ret = dma_set_mask_and_coherent(gxp->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(gxp->dev, "Failed to set DMA mask\n");
return ret;
diff --git a/gxp-mailbox-driver.c b/gxp-mailbox-driver.c
index 323b4bb..d04a0a2 100644
--- a/gxp-mailbox-driver.c
+++ b/gxp-mailbox-driver.c
@@ -416,7 +416,13 @@
{
struct gxp_mailbox *gxp_mbx = mailbox->data;
- mutex_lock(&gxp_mbx->resp_queue_lock);
+ *atomic = true;
+
+ if (try)
+ return spin_trylock_irqsave(&gxp_mbx->resp_queue_lock,
+ gxp_mbx->resp_queue_lock_flags);
+
+ spin_lock_irqsave(&gxp_mbx->resp_queue_lock, gxp_mbx->resp_queue_lock_flags);
return 1;
}
@@ -424,7 +430,7 @@
{
struct gxp_mailbox *gxp_mbx = mailbox->data;
- mutex_unlock(&gxp_mbx->resp_queue_lock);
+ spin_unlock_irqrestore(&gxp_mbx->resp_queue_lock, gxp_mbx->resp_queue_lock_flags);
}
void gxp_mailbox_gcip_ops_acquire_wait_list_lock(struct gcip_mailbox *mailbox,
@@ -433,7 +439,7 @@
{
struct gxp_mailbox *gxp_mbx = mailbox->data;
- mutex_lock(&gxp_mbx->wait_list_lock);
+ spin_lock_irqsave(&gxp_mbx->wait_list_lock, *flags);
}
void gxp_mailbox_gcip_ops_release_wait_list_lock(struct gcip_mailbox *mailbox,
@@ -442,7 +448,7 @@
{
struct gxp_mailbox *gxp_mbx = mailbox->data;
- mutex_unlock(&gxp_mbx->wait_list_lock);
+ spin_unlock_irqrestore(&gxp_mbx->wait_list_lock, flags);
}
int gxp_mailbox_gcip_ops_wait_for_cmd_queue_not_full(
diff --git a/gxp-mailbox-manager.c b/gxp-mailbox-manager.c
index 630dace..bb74fcb 100644
--- a/gxp-mailbox-manager.c
+++ b/gxp-mailbox-manager.c
@@ -13,6 +13,8 @@
#include "gxp-mcu-platform.h"
#endif
+#include <gcip/gcip-pm.h>
+
#define DEBUGFS_MAILBOX "mailbox"
static int debugfs_mailbox_execute_cmd(void *data, u64 val)
@@ -29,6 +31,11 @@
u16 cmd_code;
int ret;
+ ret = gcip_pm_get(gxp->power_mgr->pm);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to power up block %d", ret);
+ return ret;
+ }
mutex_lock(&gxp->debugfs_client_lock);
client = gxp->debugfs_client;
@@ -84,7 +91,8 @@
cmd_code = CORE_COMMAND;
#else
dev_err(gxp->dev, "This platform only supports direct-mode.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
#endif /* GXP_HAS_MCU */
}
@@ -101,6 +109,7 @@
if (client && client != gxp->debugfs_client)
gxp_client_destroy(client);
mutex_unlock(&gxp->debugfs_client_lock);
+ gcip_pm_put(gxp->power_mgr->pm);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(debugfs_mailbox_fops, NULL,
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 3d37977..cdbca66 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -12,6 +12,7 @@
#include <linux/kthread.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <uapi/linux/sched/types.h>
#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
@@ -52,15 +53,8 @@
#if GXP_USE_LEGACY_MAILBOX
gxp_mailbox_consume_responses(mailbox);
#else
- switch (mailbox->type) {
- case GXP_MBOX_TYPE_GENERAL:
- gcip_mailbox_consume_responses_work(mailbox->mbx_impl.gcip_mbx);
- break;
- case GXP_MBOX_TYPE_KCI:
- gxp_mcu_telemetry_irq_handler(
- ((struct gxp_kci *)mailbox->data)->mcu);
- break;
- }
+ if (mailbox->type == GXP_MBOX_TYPE_KCI)
+ gxp_mcu_telemetry_irq_handler(((struct gxp_kci *)mailbox->data)->mcu);
#endif
}
@@ -72,10 +66,15 @@
static void gxp_mailbox_handle_irq(struct gxp_mailbox *mailbox)
{
#if !GXP_USE_LEGACY_MAILBOX
- if (mailbox->type == GXP_MBOX_TYPE_KCI)
+ if (mailbox->type == GXP_MBOX_TYPE_KCI) {
gcip_kci_handle_irq(mailbox->mbx_impl.gcip_kci);
-#endif
+ kthread_queue_work(&mailbox->response_worker, &mailbox->response_work);
+ } else if (mailbox->type == GXP_MBOX_TYPE_GENERAL) {
+ gcip_mailbox_consume_responses_work(mailbox->mbx_impl.gcip_mbx);
+ }
+#else
kthread_queue_work(&mailbox->response_worker, &mailbox->response_work);
+#endif
}
/* Priority level for realtime worker threads */
@@ -159,7 +158,7 @@
goto err_allocate_resources;
mutex_init(&mailbox->cmd_queue_lock);
- mutex_init(&mailbox->resp_queue_lock);
+ spin_lock_init(&mailbox->resp_queue_lock);
kthread_init_worker(&mailbox->response_worker);
mailbox->response_thread = create_response_rt_thread(
mailbox->gxp->dev, &mailbox->response_worker, core_id);
@@ -336,7 +335,7 @@
return ret;
mailbox->handle_irq = gxp_mailbox_handle_irq;
- mutex_init(&mailbox->wait_list_lock);
+ spin_lock_init(&mailbox->wait_list_lock);
kthread_init_work(&mailbox->response_work,
gxp_mailbox_consume_responses_work);
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index afe9bc3..2fb4162 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -8,6 +8,7 @@
#define __GXP_MAILBOX_H__
#include <linux/kthread.h>
+#include <linux/spinlock.h>
#include "gxp-client.h"
#include "gxp-config.h" /* GXP_USE_LEGACY_MAILBOX */
@@ -188,10 +189,11 @@
struct gxp_coherent_buf resp_queue_buf;
u32 resp_queue_size; /* size of resp queue */
u32 resp_queue_head; /* offset within the resp queue */
- struct mutex resp_queue_lock; /* protects resp_queue */
+ spinlock_t resp_queue_lock; /* protects resp_queue */
+ unsigned long resp_queue_lock_flags; /* to store IRQ flags */
/* commands which need to wait for responses will be added to the wait_list */
- struct mutex wait_list_lock; /* protects wait_list */
+ spinlock_t wait_list_lock; /* protects wait_list */
/* to create our own realtime worker for handling responses */
struct kthread_worker response_worker;
struct task_struct *response_thread;
diff --git a/gxp-mcu-firmware.c b/gxp-mcu-firmware.c
index 507379d..0a8ac9d 100644
--- a/gxp-mcu-firmware.c
+++ b/gxp-mcu-firmware.c
@@ -5,6 +5,7 @@
* Copyright (C) 2022 Google LLC
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/io.h>
@@ -56,6 +57,8 @@
/* The number of times trying to rescue MCU. */
#define MCU_RESCUE_TRY 3
+/* Time(us) to boot MCU in recovery mode. */
+#define GXP_MCU_RECOVERY_BOOT_DELAY 100
/*
* Programs instruction remap CSRs.
*/
@@ -87,6 +90,11 @@
return true;
}
+static void gxp_mcu_set_boot_mode(struct gxp_mcu_firmware *mcu_fw, uint32_t mode)
+{
+ writel(mode, GXP_MCU_BOOT_MODE_OFFSET + mcu_fw->image_buf.vaddr);
+}
+
static int gxp_mcu_firmware_handshake(struct gxp_mcu_firmware *mcu_fw)
{
struct gxp_dev *gxp = mcu_fw->gxp;
@@ -142,13 +150,55 @@
*
* Must be called with holding @mcu_fw->lock.
*
+ * @force: force MCU to boot in recovery mode and execute WFI so that it can
+ * go in PG state.
+ * Returns true if MCU successfully transited to PG state, otherwise false.
+ */
+static bool gxp_pg_by_recovery_boot(struct gxp_dev *gxp, bool force)
+{
+ struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
+ int try = MCU_RESCUE_TRY, ret;
+
+ lockdep_assert_held(&mcu_fw->lock);
+
+ do {
+ if (force) {
+ gxp_mcu_set_boot_mode(mcu_fw, GXP_MCU_BOOT_MODE_RECOVERY);
+ ret = gxp_mcu_reset(gxp, true);
+ udelay(GXP_MCU_RECOVERY_BOOT_DELAY);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to reset MCU (ret=%d)", ret);
+ continue;
+ }
+ }
+
+ if (gxp_lpm_wait_state_eq(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID), LPM_PG_STATE))
+ return true;
+
+ dev_warn(gxp->dev, "MCU PSM transition to PS3 fails, current state: %u, try: %d",
+ gxp_lpm_get_state(gxp, CORE_TO_PSM(GXP_MCU_CORE_ID)), try);
+ /*
+ * If PG transition fails, MCU will not fall into WFI after the reset.
+ * Therefore, we must boot into recovery to force WFI transition.
+ */
+ force = true;
+ } while (--try > 0);
+
+ return false;
+}
+
+/*
+ * Waits for the MCU LPM transition to the PG state.
+ *
+ * Must be called with holding @mcu_fw->lock.
+ *
* @ring_doorbell: If the situation is that the MCU cannot execute the transition by itself such
* as HW watchdog timeout, it must be passed as true to trigger the doorbell and
* let the MCU do that forcefully.
*
* Returns true if MCU successfully transited to PG state, otherwise false.
*/
-static bool wait_for_pg_state_locked(struct gxp_dev *gxp, bool ring_doorbell)
+static bool gxp_pg_by_doorbell(struct gxp_dev *gxp, bool ring_doorbell)
{
struct gxp_mcu *mcu = &to_mcu_dev(gxp)->mcu;
struct gxp_mcu_firmware *mcu_fw = gxp_mcu_firmware_of(gxp);
@@ -197,6 +247,19 @@
return false;
}
+static bool wait_for_pg_state_locked(struct gxp_dev *gxp, bool force)
+{
+ bool ret;
+
+ /* TODO(b/317756665): Remove when recovery mode boot is supported in firmware. */
+ ret = gxp_pg_by_doorbell(gxp, force);
+ if (ret)
+ return ret;
+
+ /* For firmwares that supports recovery mode. */
+ return gxp_pg_by_recovery_boot(gxp, force);
+}
+
int gxp_mcu_firmware_load(struct gxp_dev *gxp, char *fw_name,
const struct firmware **fw)
{
@@ -317,6 +380,7 @@
return ret;
gxp_write_32(gxp, GXP_REG_MCU_BOOT_STAGE, 0);
+ gxp_mcu_set_boot_mode(mcu_fw, GXP_MCU_BOOT_MODE_NORMAL);
if (mcu_fw->is_secure) {
state = gsa_send_dsp_cmd(gxp->gsa_dev, GSA_DSP_START);
if (state != GSA_DSP_STATE_RUNNING) {
@@ -378,12 +442,6 @@
do {
dev_warn(gxp->dev, "Try to rescue MCU (try=%d)", try);
- /*
- * TODO(b/286179665): Currently, this function must not be called when MCU is in
- * PS0 state because GSA shutdown will be NO-OP and powering block down will cause
- * a kernel panic eventually. We need to ask the architecture team for sharing how
- * to forcefully transit MCU to PS3 state with us.
- */
if (!wait_for_pg_state_locked(gxp, true)) {
dev_err(gxp->dev,
"Cannot proceed MCU rescue because it is not in PG state");
@@ -406,7 +464,19 @@
continue;
}
- /* Try booting MCU up again and hanshaking with it. */
+#if GXP_LPM_IN_AON
+ /*
+ * MCU reset mechanisms are chip specific. For some chips, reset assert bits may
+ * belong to LPM csr space which doesn't get reset on block power cycle and still be
+ * held. Release reset bits to arm MCU for a run.
+ */
+ ret = gxp_mcu_reset(gxp, true);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to reset MCU after blk reboot (ret=%d)", ret);
+ continue;
+ }
+#endif
+ /* Try booting MCU up again and handshaking with it. */
ret = gxp_mcu_firmware_start(mcu_fw);
if (ret) {
dev_err(gxp->dev, "Failed to boot MCU up, (ret=%d)", ret);
@@ -419,7 +489,6 @@
ret);
continue;
}
-
dev_info(gxp->dev, "Succeeded in rescuing MCU");
} while (ret && --try > 0);
@@ -449,15 +518,10 @@
#else
/*
* Waits for MCU transiting to PG state. If KCI shutdown was failed above (ret != 0), it
- * will wait for that with ringing the doorbell.
+ * will force to PG state.
*/
- if (!wait_for_pg_state_locked(gxp, /*ring_doorbell=*/ret)) {
- dev_err(gxp->dev, "Failed to transit MCU to PG state after KCI shutdown");
- /*
- * TODO(b/286179665): Call rescue function and ring doorbell to transit MCU to PG
- * from here.
- */
- }
+ if (!wait_for_pg_state_locked(gxp, /*force=*/ret))
+ dev_warn(gxp->dev, "Failed to transit MCU to PG state after KCI shutdown");
#endif /* IS_ENABLED(CONFIG_GXP_GEM5) */
/* To test the case of the MCU FW sending FW_CRASH RKCI in the middle. */
@@ -513,6 +577,7 @@
dev_err(gxp->dev, "Failed to run MCU even after trying to rescue it: %d",
ret);
gxp_mcu_firmware_shutdown(mcu_fw);
+ wait_for_pg_state_locked(gxp, true);
return ret;
}
}
@@ -901,14 +966,13 @@
/* Waits for the MCU transiting to PG state and restart the MCU firmware. */
if (!wait_for_pg_state_locked(gxp, crash_type == GCIP_FW_CRASH_HW_WDG_TIMEOUT)) {
- dev_err(gxp->dev, "Failed to transit MCU LPM state to PG");
- /* TODO(b/286179665): Call rescue function from here. */
+ dev_warn(gxp->dev, "Failed to transit MCU LPM state to PG");
goto out;
}
ret = gxp_mcu_firmware_run_locked(mcu_fw);
if (ret)
- dev_err(gxp->dev, "Failed to run MCU firmware (ret=%d)", ret);
+ dev_warn(gxp->dev, "Failed to run MCU firmware (ret=%d)", ret);
out:
list_for_each_entry (client, &gxp->client_list, list_entry) {
diff --git a/gxp-mcu-firmware.h b/gxp-mcu-firmware.h
index 2b8d54c..739c38b 100644
--- a/gxp-mcu-firmware.h
+++ b/gxp-mcu-firmware.h
@@ -16,6 +16,9 @@
#include "gxp-internal.h"
+#define GXP_MCU_BOOT_MODE_NORMAL 0
+#define GXP_MCU_BOOT_MODE_RECOVERY 1
+
struct gxp_mcu_firmware {
struct gxp_dev *gxp;
/* resource for MCU firmware image */
diff --git a/gxp-uci.c b/gxp-uci.c
index 2dba6f5..2f2995f 100644
--- a/gxp-uci.c
+++ b/gxp-uci.c
@@ -227,6 +227,7 @@
{
struct gxp_uci_async_response *async_resp;
struct mailbox_resp_queue *mailbox_resp_queue;
+ unsigned long flags;
int i;
if (!awaiter)
@@ -236,16 +237,16 @@
mailbox_resp_queue = container_of(
async_resp->wait_queue, struct mailbox_resp_queue, wait_queue);
- spin_lock(async_resp->queue_lock);
+ spin_lock_irqsave(async_resp->queue_lock, flags);
if (mailbox_resp_queue->wait_queue_closed) {
- spin_unlock(async_resp->queue_lock);
+ spin_unlock_irqrestore(async_resp->queue_lock, flags);
return -EIO;
} else {
async_resp->awaiter = awaiter;
list_add_tail(&async_resp->wait_list_entry,
async_resp->wait_queue);
}
- spin_unlock(async_resp->queue_lock);
+ spin_unlock_irqrestore(async_resp->queue_lock, flags);
if (async_resp->out_fences) {
for (i = 0; i < async_resp->out_fences->size; i++)
diff --git a/gxp.h b/gxp.h
index fde8701..c483cd4 100644
--- a/gxp.h
+++ b/gxp.h
@@ -13,7 +13,7 @@
/* Interface Version */
#define GXP_INTERFACE_VERSION_MAJOR 1
-#define GXP_INTERFACE_VERSION_MINOR 22
+#define GXP_INTERFACE_VERSION_MINOR 23
#define GXP_INTERFACE_VERSION_BUILD 0
/* mmap offsets for MCU logging and tracing buffers */